backtrace_test.cpp 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893
  1. /*
  2. * Copyright (C) 2013 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define _GNU_SOURCE 1
  17. #include <dirent.h>
  18. #include <dlfcn.h>
  19. #include <errno.h>
  20. #include <fcntl.h>
  21. #include <inttypes.h>
  22. #include <malloc.h>
  23. #include <pthread.h>
  24. #include <signal.h>
  25. #include <stdint.h>
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <sys/ptrace.h>
  30. #include <sys/stat.h>
  31. #include <sys/types.h>
  32. #include <sys/wait.h>
  33. #include <time.h>
  34. #include <ucontext.h>
  35. #include <unistd.h>
  36. #include <algorithm>
  37. #include <list>
  38. #include <memory>
  39. #include <ostream>
  40. #include <string>
  41. #include <vector>
  42. #include <backtrace/Backtrace.h>
  43. #include <backtrace/BacktraceMap.h>
  44. #include <android-base/macros.h>
  45. #include <android-base/stringprintf.h>
  46. #include <android-base/test_utils.h>
  47. #include <android-base/threads.h>
  48. #include <android-base/unique_fd.h>
  49. #include <cutils/atomic.h>
  50. #include <gtest/gtest.h>
  51. // For the THREAD_SIGNAL definition.
  52. #include "BacktraceCurrent.h"
  53. #include "BacktraceTest.h"
  54. #include "backtrace_testlib.h"
  55. // Number of microseconds per milliseconds.
  56. #define US_PER_MSEC 1000
  57. // Number of nanoseconds in a second.
  58. #define NS_PER_SEC 1000000000ULL
  59. // Number of simultaneous dumping operations to perform.
  60. #define NUM_THREADS 40
  61. // Number of simultaneous threads running in our forked process.
  62. #define NUM_PTRACE_THREADS 5
  63. // The list of shared libaries that make up the backtrace library.
  64. static std::vector<std::string> kBacktraceLibs{"libunwindstack.so", "libbacktrace.so"};
  65. struct thread_t {
  66. pid_t tid;
  67. int32_t state;
  68. pthread_t threadId;
  69. void* data;
  70. };
  71. struct dump_thread_t {
  72. thread_t thread;
  73. BacktraceMap* map;
  74. Backtrace* backtrace;
  75. int32_t* now;
  76. int32_t done;
  77. };
  78. typedef Backtrace* (*create_func_t)(pid_t, pid_t, BacktraceMap*);
  79. typedef BacktraceMap* (*map_create_func_t)(pid_t, bool);
  80. static void VerifyLevelDump(Backtrace* backtrace, create_func_t create_func = nullptr,
  81. map_create_func_t map_func = nullptr);
  82. static void VerifyMaxDump(Backtrace* backtrace, create_func_t create_func = nullptr,
  83. map_create_func_t map_func = nullptr);
  84. void* BacktraceTest::dl_handle_;
  85. int (*BacktraceTest::test_level_one_)(int, int, int, int, void (*)(void*), void*);
  86. int (*BacktraceTest::test_level_two_)(int, int, int, int, void (*)(void*), void*);
  87. int (*BacktraceTest::test_level_three_)(int, int, int, int, void (*)(void*), void*);
  88. int (*BacktraceTest::test_level_four_)(int, int, int, int, void (*)(void*), void*);
  89. int (*BacktraceTest::test_recursive_call_)(int, void (*)(void*), void*);
  90. void (*BacktraceTest::test_get_context_and_wait_)(void*, volatile int*);
  91. void (*BacktraceTest::test_signal_action_)(int, siginfo_t*, void*);
  92. void (*BacktraceTest::test_signal_handler_)(int);
  93. extern "C" bool GetInitialArgs(const char*** args, size_t* num_args) {
  94. static const char* initial_args[] = {"--slow_threshold_ms=8000", "--deadline_threshold_ms=15000"};
  95. *args = initial_args;
  96. *num_args = 2;
  97. return true;
  98. }
  99. static uint64_t NanoTime() {
  100. struct timespec t = { 0, 0 };
  101. clock_gettime(CLOCK_MONOTONIC, &t);
  102. return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
  103. }
  104. static std::string DumpFrames(Backtrace* backtrace) {
  105. if (backtrace->NumFrames() == 0) {
  106. return " No frames to dump.\n";
  107. }
  108. std::string frame;
  109. for (size_t i = 0; i < backtrace->NumFrames(); i++) {
  110. frame += " " + backtrace->FormatFrameData(i) + '\n';
  111. }
  112. return frame;
  113. }
  114. static void WaitForStop(pid_t pid) {
  115. uint64_t start = NanoTime();
  116. siginfo_t si;
  117. while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
  118. if ((NanoTime() - start) > NS_PER_SEC) {
  119. printf("The process did not get to a stopping point in 1 second.\n");
  120. break;
  121. }
  122. usleep(US_PER_MSEC);
  123. }
  124. }
  125. static void CreateRemoteProcess(pid_t* pid) {
  126. if ((*pid = fork()) == 0) {
  127. while (true)
  128. ;
  129. _exit(0);
  130. }
  131. ASSERT_NE(-1, *pid);
  132. ASSERT_TRUE(ptrace(PTRACE_ATTACH, *pid, 0, 0) == 0);
  133. // Wait for the process to get to a stopping point.
  134. WaitForStop(*pid);
  135. }
  136. static void FinishRemoteProcess(pid_t pid) {
  137. ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
  138. kill(pid, SIGKILL);
  139. ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
  140. }
  141. #if !defined(__ANDROID__) || defined(__arm__)
  142. // On host and arm target we aren't guaranteed that we will terminate cleanly.
  143. #define VERIFY_NO_ERROR(error_code) \
  144. ASSERT_TRUE(error_code == BACKTRACE_UNWIND_NO_ERROR || \
  145. error_code == BACKTRACE_UNWIND_ERROR_UNWIND_INFO || \
  146. error_code == BACKTRACE_UNWIND_ERROR_MAP_MISSING) \
  147. << "Unknown error code " << std::to_string(error_code);
  148. #else
  149. #define VERIFY_NO_ERROR(error_code) ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, error_code);
  150. #endif
  151. static bool ReadyLevelBacktrace(Backtrace* backtrace) {
  152. // See if test_level_four is in the backtrace.
  153. bool found = false;
  154. for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
  155. if (it->func_name == "test_level_four") {
  156. found = true;
  157. break;
  158. }
  159. }
  160. return found;
  161. }
  162. static void VerifyLevelDump(Backtrace* backtrace, create_func_t, map_create_func_t) {
  163. ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0))
  164. << DumpFrames(backtrace);
  165. ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
  166. << DumpFrames(backtrace);
  167. // Look through the frames starting at the highest to find the
  168. // frame we want.
  169. size_t frame_num = 0;
  170. for (size_t i = backtrace->NumFrames()-1; i > 2; i--) {
  171. if (backtrace->GetFrame(i)->func_name == "test_level_one") {
  172. frame_num = i;
  173. break;
  174. }
  175. }
  176. ASSERT_LT(static_cast<size_t>(0), frame_num) << DumpFrames(backtrace);
  177. ASSERT_LE(static_cast<size_t>(3), frame_num) << DumpFrames(backtrace);
  178. ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one")
  179. << DumpFrames(backtrace);
  180. ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two")
  181. << DumpFrames(backtrace);
  182. ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three")
  183. << DumpFrames(backtrace);
  184. ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four")
  185. << DumpFrames(backtrace);
  186. }
  187. static void VerifyLevelBacktrace(void*) {
  188. std::unique_ptr<Backtrace> backtrace(
  189. Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
  190. ASSERT_TRUE(backtrace.get() != nullptr);
  191. ASSERT_TRUE(backtrace->Unwind(0));
  192. VERIFY_NO_ERROR(backtrace->GetError().error_code);
  193. VerifyLevelDump(backtrace.get());
  194. }
  195. static bool ReadyMaxBacktrace(Backtrace* backtrace) {
  196. return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES);
  197. }
  198. static void VerifyMaxDump(Backtrace* backtrace, create_func_t, map_create_func_t) {
  199. ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
  200. << DumpFrames(backtrace);
  201. // Verify that the last frame is our recursive call.
  202. ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name, "test_recursive_call")
  203. << DumpFrames(backtrace);
  204. }
  205. static void VerifyMaxBacktrace(void*) {
  206. std::unique_ptr<Backtrace> backtrace(
  207. Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
  208. ASSERT_TRUE(backtrace.get() != nullptr);
  209. ASSERT_TRUE(backtrace->Unwind(0));
  210. ASSERT_EQ(BACKTRACE_UNWIND_ERROR_EXCEED_MAX_FRAMES_LIMIT, backtrace->GetError().error_code);
  211. VerifyMaxDump(backtrace.get());
  212. }
  213. static void ThreadSetState(void* data) {
  214. thread_t* thread = reinterpret_cast<thread_t*>(data);
  215. android_atomic_acquire_store(1, &thread->state);
  216. volatile int i = 0;
  217. while (thread->state) {
  218. i++;
  219. }
  220. }
  221. static bool WaitForNonZero(int32_t* value, uint64_t seconds) {
  222. uint64_t start = NanoTime();
  223. do {
  224. if (android_atomic_acquire_load(value)) {
  225. return true;
  226. }
  227. } while ((NanoTime() - start) < seconds * NS_PER_SEC);
  228. return false;
  229. }
  230. TEST_F(BacktraceTest, local_no_unwind_frames) {
  231. // Verify that a local unwind does not include any frames within
  232. // libunwind or libbacktrace.
  233. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
  234. ASSERT_TRUE(backtrace.get() != nullptr);
  235. ASSERT_TRUE(backtrace->Unwind(0));
  236. VERIFY_NO_ERROR(backtrace->GetError().error_code);
  237. ASSERT_TRUE(backtrace->NumFrames() != 0);
  238. // None of the frames should be in the backtrace libraries.
  239. for (const auto& frame : *backtrace ) {
  240. if (BacktraceMap::IsValid(frame.map)) {
  241. const std::string name = basename(frame.map.name.c_str());
  242. for (const auto& lib : kBacktraceLibs) {
  243. ASSERT_TRUE(name != lib) << DumpFrames(backtrace.get());
  244. }
  245. }
  246. }
  247. }
  248. TEST_F(BacktraceTest, local_unwind_frames) {
  249. // Verify that a local unwind with the skip frames disabled does include
  250. // frames within the backtrace libraries.
  251. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
  252. ASSERT_TRUE(backtrace.get() != nullptr);
  253. backtrace->SetSkipFrames(false);
  254. ASSERT_TRUE(backtrace->Unwind(0));
  255. VERIFY_NO_ERROR(backtrace->GetError().error_code);
  256. ASSERT_TRUE(backtrace->NumFrames() != 0);
  257. size_t first_frame_non_backtrace_lib = 0;
  258. for (const auto& frame : *backtrace) {
  259. if (BacktraceMap::IsValid(frame.map)) {
  260. const std::string name = basename(frame.map.name.c_str());
  261. bool found = false;
  262. for (const auto& lib : kBacktraceLibs) {
  263. if (name == lib) {
  264. found = true;
  265. break;
  266. }
  267. }
  268. if (!found) {
  269. first_frame_non_backtrace_lib = frame.num;
  270. break;
  271. }
  272. }
  273. }
  274. ASSERT_NE(0U, first_frame_non_backtrace_lib) << "No frames found in backtrace libraries:\n"
  275. << DumpFrames(backtrace.get());
  276. }
  277. TEST_F(BacktraceTest, local_trace) {
  278. ASSERT_NE(test_level_one_(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0);
  279. }
  280. static void VerifyIgnoreFrames(Backtrace* bt_all, Backtrace* bt_ign1, Backtrace* bt_ign2,
  281. const char* cur_proc) {
  282. ASSERT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1) << "All backtrace:\n"
  283. << DumpFrames(bt_all)
  284. << "Ignore 1 backtrace:\n"
  285. << DumpFrames(bt_ign1);
  286. ASSERT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2) << "All backtrace:\n"
  287. << DumpFrames(bt_all)
  288. << "Ignore 2 backtrace:\n"
  289. << DumpFrames(bt_ign2);
  290. // Check all of the frames are the same > the current frame.
  291. bool check = (cur_proc == nullptr);
  292. for (size_t i = 0; i < bt_ign2->NumFrames(); i++) {
  293. if (check) {
  294. EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc);
  295. EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp);
  296. EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size);
  297. EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc);
  298. EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp);
  299. EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size);
  300. }
  301. if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) {
  302. check = true;
  303. }
  304. }
  305. }
  306. static void VerifyLevelIgnoreFrames(void*) {
  307. std::unique_ptr<Backtrace> all(
  308. Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
  309. ASSERT_TRUE(all.get() != nullptr);
  310. ASSERT_TRUE(all->Unwind(0));
  311. VERIFY_NO_ERROR(all->GetError().error_code);
  312. std::unique_ptr<Backtrace> ign1(
  313. Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
  314. ASSERT_TRUE(ign1.get() != nullptr);
  315. ASSERT_TRUE(ign1->Unwind(1));
  316. VERIFY_NO_ERROR(ign1->GetError().error_code);
  317. std::unique_ptr<Backtrace> ign2(
  318. Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
  319. ASSERT_TRUE(ign2.get() != nullptr);
  320. ASSERT_TRUE(ign2->Unwind(2));
  321. VERIFY_NO_ERROR(ign2->GetError().error_code);
  322. VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames");
  323. }
  324. TEST_F(BacktraceTest, local_trace_ignore_frames) {
  325. ASSERT_NE(test_level_one_(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0);
  326. }
  327. TEST_F(BacktraceTest, local_max_trace) {
  328. ASSERT_NE(test_recursive_call_(MAX_BACKTRACE_FRAMES + 10, VerifyMaxBacktrace, nullptr), 0);
  329. }
  330. static void VerifyProcTest(pid_t pid, pid_t tid, bool (*ReadyFunc)(Backtrace*),
  331. void (*VerifyFunc)(Backtrace*, create_func_t, map_create_func_t),
  332. create_func_t create_func, map_create_func_t map_create_func) {
  333. pid_t ptrace_tid;
  334. if (tid < 0) {
  335. ptrace_tid = pid;
  336. } else {
  337. ptrace_tid = tid;
  338. }
  339. uint64_t start = NanoTime();
  340. bool verified = false;
  341. std::string last_dump;
  342. do {
  343. usleep(US_PER_MSEC);
  344. if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
  345. // Wait for the process to get to a stopping point.
  346. WaitForStop(ptrace_tid);
  347. std::unique_ptr<BacktraceMap> map;
  348. map.reset(map_create_func(pid, false));
  349. std::unique_ptr<Backtrace> backtrace(create_func(pid, tid, map.get()));
  350. ASSERT_TRUE(backtrace.get() != nullptr);
  351. ASSERT_TRUE(backtrace->Unwind(0));
  352. if (ReadyFunc(backtrace.get())) {
  353. VerifyFunc(backtrace.get(), create_func, map_create_func);
  354. verified = true;
  355. } else {
  356. last_dump = DumpFrames(backtrace.get());
  357. }
  358. ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
  359. }
  360. // If 5 seconds have passed, then we are done.
  361. } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
  362. ASSERT_TRUE(verified) << "Last backtrace:\n" << last_dump;
  363. }
  364. TEST_F(BacktraceTest, ptrace_trace) {
  365. pid_t pid;
  366. if ((pid = fork()) == 0) {
  367. ASSERT_NE(test_level_one_(1, 2, 3, 4, nullptr, nullptr), 0);
  368. _exit(1);
  369. }
  370. VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyLevelBacktrace, VerifyLevelDump,
  371. Backtrace::Create, BacktraceMap::Create);
  372. kill(pid, SIGKILL);
  373. int status;
  374. ASSERT_EQ(waitpid(pid, &status, 0), pid);
  375. }
  376. TEST_F(BacktraceTest, ptrace_max_trace) {
  377. pid_t pid;
  378. if ((pid = fork()) == 0) {
  379. ASSERT_NE(test_recursive_call_(MAX_BACKTRACE_FRAMES + 10, nullptr, nullptr), 0);
  380. _exit(1);
  381. }
  382. VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyMaxBacktrace, VerifyMaxDump, Backtrace::Create,
  383. BacktraceMap::Create);
  384. kill(pid, SIGKILL);
  385. int status;
  386. ASSERT_EQ(waitpid(pid, &status, 0), pid);
  387. }
  388. static void VerifyProcessIgnoreFrames(Backtrace* bt_all, create_func_t create_func,
  389. map_create_func_t map_create_func) {
  390. std::unique_ptr<BacktraceMap> map(map_create_func(bt_all->Pid(), false));
  391. std::unique_ptr<Backtrace> ign1(create_func(bt_all->Pid(), BACKTRACE_CURRENT_THREAD, map.get()));
  392. ASSERT_TRUE(ign1.get() != nullptr);
  393. ASSERT_TRUE(ign1->Unwind(1));
  394. VERIFY_NO_ERROR(ign1->GetError().error_code);
  395. std::unique_ptr<Backtrace> ign2(create_func(bt_all->Pid(), BACKTRACE_CURRENT_THREAD, map.get()));
  396. ASSERT_TRUE(ign2.get() != nullptr);
  397. ASSERT_TRUE(ign2->Unwind(2));
  398. VERIFY_NO_ERROR(ign2->GetError().error_code);
  399. VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr);
  400. }
  401. TEST_F(BacktraceTest, ptrace_ignore_frames) {
  402. pid_t pid;
  403. if ((pid = fork()) == 0) {
  404. ASSERT_NE(test_level_one_(1, 2, 3, 4, nullptr, nullptr), 0);
  405. _exit(1);
  406. }
  407. VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyLevelBacktrace, VerifyProcessIgnoreFrames,
  408. Backtrace::Create, BacktraceMap::Create);
  409. kill(pid, SIGKILL);
  410. int status;
  411. ASSERT_EQ(waitpid(pid, &status, 0), pid);
  412. }
  413. // Create a process with multiple threads and dump all of the threads.
  414. static void* PtraceThreadLevelRun(void*) {
  415. EXPECT_NE(BacktraceTest::test_level_one_(1, 2, 3, 4, nullptr, nullptr), 0);
  416. return nullptr;
  417. }
  418. static void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
  419. // Get the list of tasks.
  420. char task_path[128];
  421. snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
  422. std::unique_ptr<DIR, decltype(&closedir)> tasks_dir(opendir(task_path), closedir);
  423. ASSERT_TRUE(tasks_dir != nullptr);
  424. struct dirent* entry;
  425. while ((entry = readdir(tasks_dir.get())) != nullptr) {
  426. char* end;
  427. pid_t tid = strtoul(entry->d_name, &end, 10);
  428. if (*end == '\0') {
  429. threads->push_back(tid);
  430. }
  431. }
  432. }
  433. TEST_F(BacktraceTest, ptrace_threads) {
  434. pid_t pid;
  435. if ((pid = fork()) == 0) {
  436. for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
  437. pthread_attr_t attr;
  438. pthread_attr_init(&attr);
  439. pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  440. pthread_t thread;
  441. ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0);
  442. }
  443. ASSERT_NE(test_level_one_(1, 2, 3, 4, nullptr, nullptr), 0);
  444. _exit(1);
  445. }
  446. // Check to see that all of the threads are running before unwinding.
  447. std::vector<pid_t> threads;
  448. uint64_t start = NanoTime();
  449. do {
  450. usleep(US_PER_MSEC);
  451. threads.clear();
  452. GetThreads(pid, &threads);
  453. } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
  454. ((NanoTime() - start) <= 5 * NS_PER_SEC));
  455. ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
  456. ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
  457. WaitForStop(pid);
  458. for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
  459. // Skip the current forked process, we only care about the threads.
  460. if (pid == *it) {
  461. continue;
  462. }
  463. VerifyProcTest(pid, *it, ReadyLevelBacktrace, VerifyLevelDump, Backtrace::Create,
  464. BacktraceMap::Create);
  465. }
  466. FinishRemoteProcess(pid);
  467. }
  468. void VerifyLevelThread(void*) {
  469. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), android::base::GetThreadId()));
  470. ASSERT_TRUE(backtrace.get() != nullptr);
  471. ASSERT_TRUE(backtrace->Unwind(0));
  472. VERIFY_NO_ERROR(backtrace->GetError().error_code);
  473. VerifyLevelDump(backtrace.get());
  474. }
  475. TEST_F(BacktraceTest, thread_current_level) {
  476. ASSERT_NE(test_level_one_(1, 2, 3, 4, VerifyLevelThread, nullptr), 0);
  477. }
  478. static void VerifyMaxThread(void*) {
  479. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), android::base::GetThreadId()));
  480. ASSERT_TRUE(backtrace.get() != nullptr);
  481. ASSERT_TRUE(backtrace->Unwind(0));
  482. ASSERT_EQ(BACKTRACE_UNWIND_ERROR_EXCEED_MAX_FRAMES_LIMIT, backtrace->GetError().error_code);
  483. VerifyMaxDump(backtrace.get());
  484. }
  485. TEST_F(BacktraceTest, thread_current_max) {
  486. ASSERT_NE(test_recursive_call_(MAX_BACKTRACE_FRAMES + 10, VerifyMaxThread, nullptr), 0);
  487. }
  488. static void* ThreadLevelRun(void* data) {
  489. thread_t* thread = reinterpret_cast<thread_t*>(data);
  490. thread->tid = android::base::GetThreadId();
  491. EXPECT_NE(BacktraceTest::test_level_one_(1, 2, 3, 4, ThreadSetState, data), 0);
  492. return nullptr;
  493. }
  494. TEST_F(BacktraceTest, thread_level_trace) {
  495. pthread_attr_t attr;
  496. pthread_attr_init(&attr);
  497. pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  498. thread_t thread_data = { 0, 0, 0, nullptr };
  499. pthread_t thread;
  500. ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
  501. // Wait up to 2 seconds for the tid to be set.
  502. ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
  503. // Make sure that the thread signal used is not visible when compiled for
  504. // the target.
  505. #if !defined(__GLIBC__)
  506. ASSERT_LT(THREAD_SIGNAL, SIGRTMIN);
  507. #endif
  508. // Save the current signal action and make sure it is restored afterwards.
  509. struct sigaction cur_action;
  510. ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0);
  511. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
  512. ASSERT_TRUE(backtrace.get() != nullptr);
  513. ASSERT_TRUE(backtrace->Unwind(0));
  514. VERIFY_NO_ERROR(backtrace->GetError().error_code);
  515. VerifyLevelDump(backtrace.get());
  516. // Tell the thread to exit its infinite loop.
  517. android_atomic_acquire_store(0, &thread_data.state);
  518. // Verify that the old action was restored.
  519. struct sigaction new_action;
  520. ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0);
  521. EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
  522. // The SA_RESTORER flag gets set behind our back, so a direct comparison
  523. // doesn't work unless we mask the value off. Mips doesn't have this
  524. // flag, so skip this on that platform.
  525. #if defined(SA_RESTORER)
  526. cur_action.sa_flags &= ~SA_RESTORER;
  527. new_action.sa_flags &= ~SA_RESTORER;
  528. #elif defined(__GLIBC__)
  529. // Our host compiler doesn't appear to define this flag for some reason.
  530. cur_action.sa_flags &= ~0x04000000;
  531. new_action.sa_flags &= ~0x04000000;
  532. #endif
  533. EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
  534. }
  535. TEST_F(BacktraceTest, thread_ignore_frames) {
  536. pthread_attr_t attr;
  537. pthread_attr_init(&attr);
  538. pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  539. thread_t thread_data = { 0, 0, 0, nullptr };
  540. pthread_t thread;
  541. ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
  542. // Wait up to 2 seconds for the tid to be set.
  543. ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
  544. std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid));
  545. ASSERT_TRUE(all.get() != nullptr);
  546. ASSERT_TRUE(all->Unwind(0));
  547. VERIFY_NO_ERROR(all->GetError().error_code);
  548. std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid));
  549. ASSERT_TRUE(ign1.get() != nullptr);
  550. ASSERT_TRUE(ign1->Unwind(1));
  551. VERIFY_NO_ERROR(ign1->GetError().error_code);
  552. std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid));
  553. ASSERT_TRUE(ign2.get() != nullptr);
  554. ASSERT_TRUE(ign2->Unwind(2));
  555. VERIFY_NO_ERROR(ign2->GetError().error_code);
  556. VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr);
  557. // Tell the thread to exit its infinite loop.
  558. android_atomic_acquire_store(0, &thread_data.state);
  559. }
  560. static void* ThreadMaxRun(void* data) {
  561. thread_t* thread = reinterpret_cast<thread_t*>(data);
  562. thread->tid = android::base::GetThreadId();
  563. EXPECT_NE(BacktraceTest::test_recursive_call_(MAX_BACKTRACE_FRAMES + 10, ThreadSetState, data),
  564. 0);
  565. return nullptr;
  566. }
  567. TEST_F(BacktraceTest, thread_max_trace) {
  568. pthread_attr_t attr;
  569. pthread_attr_init(&attr);
  570. pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  571. thread_t thread_data = { 0, 0, 0, nullptr };
  572. pthread_t thread;
  573. ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
  574. // Wait for the tid to be set.
  575. ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
  576. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
  577. ASSERT_TRUE(backtrace.get() != nullptr);
  578. ASSERT_TRUE(backtrace->Unwind(0));
  579. ASSERT_EQ(BACKTRACE_UNWIND_ERROR_EXCEED_MAX_FRAMES_LIMIT, backtrace->GetError().error_code);
  580. VerifyMaxDump(backtrace.get());
  581. // Tell the thread to exit its infinite loop.
  582. android_atomic_acquire_store(0, &thread_data.state);
  583. }
  584. static void* ThreadDump(void* data) {
  585. dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
  586. while (true) {
  587. if (android_atomic_acquire_load(dump->now)) {
  588. break;
  589. }
  590. }
  591. // The status of the actual unwind will be checked elsewhere.
  592. dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid, dump->map);
  593. dump->backtrace->Unwind(0);
  594. android_atomic_acquire_store(1, &dump->done);
  595. return nullptr;
  596. }
  597. static void MultipleThreadDumpTest(bool share_map) {
  598. // Dump NUM_THREADS simultaneously using the same map.
  599. std::vector<thread_t> runners(NUM_THREADS);
  600. std::vector<dump_thread_t> dumpers(NUM_THREADS);
  601. pthread_attr_t attr;
  602. pthread_attr_init(&attr);
  603. pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  604. for (size_t i = 0; i < NUM_THREADS; i++) {
  605. // Launch the runners, they will spin in hard loops doing nothing.
  606. runners[i].tid = 0;
  607. runners[i].state = 0;
  608. ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
  609. }
  610. // Wait for tids to be set.
  611. for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
  612. ASSERT_TRUE(WaitForNonZero(&it->state, 30));
  613. }
  614. // Start all of the dumpers at once, they will spin until they are signalled
  615. // to begin their dump run.
  616. std::unique_ptr<BacktraceMap> map;
  617. if (share_map) {
  618. map.reset(BacktraceMap::Create(getpid()));
  619. }
  620. int32_t dump_now = 0;
  621. for (size_t i = 0; i < NUM_THREADS; i++) {
  622. dumpers[i].thread.tid = runners[i].tid;
  623. dumpers[i].thread.state = 0;
  624. dumpers[i].done = 0;
  625. dumpers[i].now = &dump_now;
  626. dumpers[i].map = map.get();
  627. ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
  628. }
  629. // Start all of the dumpers going at once.
  630. android_atomic_acquire_store(1, &dump_now);
  631. for (size_t i = 0; i < NUM_THREADS; i++) {
  632. ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
  633. // Tell the runner thread to exit its infinite loop.
  634. android_atomic_acquire_store(0, &runners[i].state);
  635. ASSERT_TRUE(dumpers[i].backtrace != nullptr);
  636. VerifyMaxDump(dumpers[i].backtrace);
  637. delete dumpers[i].backtrace;
  638. dumpers[i].backtrace = nullptr;
  639. }
  640. }
  641. TEST_F(BacktraceTest, thread_multiple_dump) {
  642. MultipleThreadDumpTest(false);
  643. }
  644. TEST_F(BacktraceTest, thread_multiple_dump_same_map) {
  645. MultipleThreadDumpTest(true);
  646. }
  647. // This test is for UnwindMaps that should share the same map cursor when
  648. // multiple maps are created for the current process at the same time.
  649. TEST_F(BacktraceTest, simultaneous_maps) {
  650. BacktraceMap* map1 = BacktraceMap::Create(getpid());
  651. BacktraceMap* map2 = BacktraceMap::Create(getpid());
  652. BacktraceMap* map3 = BacktraceMap::Create(getpid());
  653. Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1);
  654. ASSERT_TRUE(back1 != nullptr);
  655. EXPECT_TRUE(back1->Unwind(0));
  656. VERIFY_NO_ERROR(back1->GetError().error_code);
  657. delete back1;
  658. delete map1;
  659. Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2);
  660. ASSERT_TRUE(back2 != nullptr);
  661. EXPECT_TRUE(back2->Unwind(0));
  662. VERIFY_NO_ERROR(back2->GetError().error_code);
  663. delete back2;
  664. delete map2;
  665. Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3);
  666. ASSERT_TRUE(back3 != nullptr);
  667. EXPECT_TRUE(back3->Unwind(0));
  668. VERIFY_NO_ERROR(back3->GetError().error_code);
  669. delete back3;
  670. delete map3;
  671. }
  672. TEST_F(BacktraceTest, fillin_erases) {
  673. BacktraceMap* back_map = BacktraceMap::Create(getpid());
  674. backtrace_map_t map;
  675. map.start = 1;
  676. map.end = 3;
  677. map.flags = 1;
  678. map.name = "Initialized";
  679. back_map->FillIn(0, &map);
  680. delete back_map;
  681. ASSERT_FALSE(BacktraceMap::IsValid(map));
  682. ASSERT_EQ(static_cast<uint64_t>(0), map.start);
  683. ASSERT_EQ(static_cast<uint64_t>(0), map.end);
  684. ASSERT_EQ(0, map.flags);
  685. ASSERT_EQ("", map.name);
  686. }
  687. TEST_F(BacktraceTest, format_test) {
  688. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD));
  689. ASSERT_TRUE(backtrace.get() != nullptr);
  690. backtrace_frame_data_t frame;
  691. frame.num = 1;
  692. frame.pc = 2;
  693. frame.rel_pc = 2;
  694. frame.sp = 0;
  695. frame.stack_size = 0;
  696. frame.func_offset = 0;
  697. // Check no map set.
  698. frame.num = 1;
  699. #if defined(__LP64__)
  700. EXPECT_EQ("#01 pc 0000000000000002 <unknown>",
  701. #else
  702. EXPECT_EQ("#01 pc 00000002 <unknown>",
  703. #endif
  704. backtrace->FormatFrameData(&frame));
  705. // Check map name empty, but exists.
  706. frame.pc = 0xb0020;
  707. frame.rel_pc = 0x20;
  708. frame.map.start = 0xb0000;
  709. frame.map.end = 0xbffff;
  710. frame.map.load_bias = 0;
  711. #if defined(__LP64__)
  712. EXPECT_EQ("#01 pc 0000000000000020 <anonymous:00000000000b0000>",
  713. #else
  714. EXPECT_EQ("#01 pc 00000020 <anonymous:000b0000>",
  715. #endif
  716. backtrace->FormatFrameData(&frame));
  717. // Check map name begins with a [.
  718. frame.pc = 0xc0020;
  719. frame.map.start = 0xc0000;
  720. frame.map.end = 0xcffff;
  721. frame.map.load_bias = 0;
  722. frame.map.name = "[anon:thread signal stack]";
  723. #if defined(__LP64__)
  724. EXPECT_EQ("#01 pc 0000000000000020 [anon:thread signal stack:00000000000c0000]",
  725. #else
  726. EXPECT_EQ("#01 pc 00000020 [anon:thread signal stack:000c0000]",
  727. #endif
  728. backtrace->FormatFrameData(&frame));
  729. // Check relative pc is set and map name is set.
  730. frame.pc = 0x12345679;
  731. frame.rel_pc = 0x12345678;
  732. frame.map.name = "MapFake";
  733. frame.map.start = 1;
  734. frame.map.end = 1;
  735. #if defined(__LP64__)
  736. EXPECT_EQ("#01 pc 0000000012345678 MapFake",
  737. #else
  738. EXPECT_EQ("#01 pc 12345678 MapFake",
  739. #endif
  740. backtrace->FormatFrameData(&frame));
  741. // Check func_name is set, but no func offset.
  742. frame.func_name = "ProcFake";
  743. #if defined(__LP64__)
  744. EXPECT_EQ("#01 pc 0000000012345678 MapFake (ProcFake)",
  745. #else
  746. EXPECT_EQ("#01 pc 12345678 MapFake (ProcFake)",
  747. #endif
  748. backtrace->FormatFrameData(&frame));
  749. // Check func_name is set, and func offset is non-zero.
  750. frame.func_offset = 645;
  751. #if defined(__LP64__)
  752. EXPECT_EQ("#01 pc 0000000012345678 MapFake (ProcFake+645)",
  753. #else
  754. EXPECT_EQ("#01 pc 12345678 MapFake (ProcFake+645)",
  755. #endif
  756. backtrace->FormatFrameData(&frame));
  757. // Check func_name is set, func offset is non-zero, and load_bias is non-zero.
  758. frame.rel_pc = 0x123456dc;
  759. frame.func_offset = 645;
  760. frame.map.load_bias = 100;
  761. #if defined(__LP64__)
  762. EXPECT_EQ("#01 pc 00000000123456dc MapFake (ProcFake+645)",
  763. #else
  764. EXPECT_EQ("#01 pc 123456dc MapFake (ProcFake+645)",
  765. #endif
  766. backtrace->FormatFrameData(&frame));
  767. // Check a non-zero map offset.
  768. frame.map.offset = 0x1000;
  769. #if defined(__LP64__)
  770. EXPECT_EQ("#01 pc 00000000123456dc MapFake (offset 0x1000) (ProcFake+645)",
  771. #else
  772. EXPECT_EQ("#01 pc 123456dc MapFake (offset 0x1000) (ProcFake+645)",
  773. #endif
  774. backtrace->FormatFrameData(&frame));
  775. }
  776. struct map_test_t {
  777. uint64_t start;
  778. uint64_t end;
  779. };
  780. static bool map_sort(map_test_t i, map_test_t j) { return i.start < j.start; }
  781. static std::string GetTestMapsAsString(const std::vector<map_test_t>& maps) {
  782. if (maps.size() == 0) {
  783. return "No test map entries\n";
  784. }
  785. std::string map_txt;
  786. for (auto map : maps) {
  787. map_txt += android::base::StringPrintf("%" PRIx64 "-%" PRIx64 "\n", map.start, map.end);
  788. }
  789. return map_txt;
  790. }
  791. static std::string GetMapsAsString(BacktraceMap* maps) {
  792. if (maps->size() == 0) {
  793. return "No map entries\n";
  794. }
  795. std::string map_txt;
  796. for (const backtrace_map_t* map : *maps) {
  797. map_txt += android::base::StringPrintf(
  798. "%" PRIx64 "-%" PRIx64 " flags: 0x%x offset: 0x%" PRIx64 " load_bias: 0x%" PRIx64,
  799. map->start, map->end, map->flags, map->offset, map->load_bias);
  800. if (!map->name.empty()) {
  801. map_txt += ' ' + map->name;
  802. }
  803. map_txt += '\n';
  804. }
  805. return map_txt;
  806. }
  807. static void VerifyMap(pid_t pid) {
  808. char buffer[4096];
  809. snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
  810. FILE* map_file = fopen(buffer, "r");
  811. ASSERT_TRUE(map_file != nullptr);
  812. std::vector<map_test_t> test_maps;
  813. while (fgets(buffer, sizeof(buffer), map_file)) {
  814. map_test_t map;
  815. ASSERT_EQ(2, sscanf(buffer, "%" SCNx64 "-%" SCNx64 " ", &map.start, &map.end));
  816. test_maps.push_back(map);
  817. }
  818. fclose(map_file);
  819. std::sort(test_maps.begin(), test_maps.end(), map_sort);
  820. std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
  821. // Basic test that verifies that the map is in the expected order.
  822. auto test_it = test_maps.begin();
  823. for (auto it = map->begin(); it != map->end(); ++it) {
  824. ASSERT_TRUE(test_it != test_maps.end()) << "Mismatch in number of maps, expected test maps:\n"
  825. << GetTestMapsAsString(test_maps) << "Actual maps:\n"
  826. << GetMapsAsString(map.get());
  827. ASSERT_EQ(test_it->start, (*it)->start) << "Mismatch in map data, expected test maps:\n"
  828. << GetTestMapsAsString(test_maps) << "Actual maps:\n"
  829. << GetMapsAsString(map.get());
  830. ASSERT_EQ(test_it->end, (*it)->end) << "Mismatch maps in map data, expected test maps:\n"
  831. << GetTestMapsAsString(test_maps) << "Actual maps:\n"
  832. << GetMapsAsString(map.get());
  833. // Make sure the load bias get set to a value.
  834. ASSERT_NE(static_cast<uint64_t>(-1), (*it)->load_bias) << "Found uninitialized load_bias\n"
  835. << GetMapsAsString(map.get());
  836. ++test_it;
  837. }
  838. ASSERT_TRUE(test_it == test_maps.end());
  839. }
  840. TEST_F(BacktraceTest, verify_map_remote) {
  841. pid_t pid;
  842. CreateRemoteProcess(&pid);
  843. // The maps should match exactly since the forked process has been paused.
  844. VerifyMap(pid);
  845. FinishRemoteProcess(pid);
  846. }
  847. static void InitMemory(uint8_t* memory, size_t bytes) {
  848. for (size_t i = 0; i < bytes; i++) {
  849. memory[i] = i;
  850. if (memory[i] == '\0') {
  851. // Don't use '\0' in our data so we can verify that an overread doesn't
  852. // occur by using a '\0' as the character after the read data.
  853. memory[i] = 23;
  854. }
  855. }
  856. }
  857. static void* ThreadReadTest(void* data) {
  858. thread_t* thread_data = reinterpret_cast<thread_t*>(data);
  859. thread_data->tid = android::base::GetThreadId();
  860. // Create two map pages.
  861. // Mark the second page as not-readable.
  862. size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
  863. uint8_t* memory;
  864. if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
  865. return reinterpret_cast<void*>(-1);
  866. }
  867. if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
  868. return reinterpret_cast<void*>(-1);
  869. }
  870. // Set up a simple pattern in memory.
  871. InitMemory(memory, pagesize);
  872. thread_data->data = memory;
  873. // Tell the caller it's okay to start reading memory.
  874. android_atomic_acquire_store(1, &thread_data->state);
  875. // Loop waiting for the caller to finish reading the memory.
  876. while (thread_data->state) {
  877. }
  878. // Re-enable read-write on the page so that we don't crash if we try
  879. // and access data on this page when freeing the memory.
  880. if (mprotect(&memory[pagesize], pagesize, PROT_READ | PROT_WRITE) != 0) {
  881. return reinterpret_cast<void*>(-1);
  882. }
  883. free(memory);
  884. android_atomic_acquire_store(1, &thread_data->state);
  885. return nullptr;
  886. }
  887. static void RunReadTest(Backtrace* backtrace, uint64_t read_addr) {
  888. size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
  889. // Create a page of data to use to do quick compares.
  890. uint8_t* expected = new uint8_t[pagesize];
  891. InitMemory(expected, pagesize);
  892. uint8_t* data = new uint8_t[2 * pagesize];
  893. // Verify that we can only read one page worth of data.
  894. size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize);
  895. ASSERT_EQ(pagesize, bytes_read);
  896. ASSERT_TRUE(memcmp(data, expected, pagesize) == 0);
  897. // Verify unaligned reads.
  898. for (size_t i = 1; i < sizeof(word_t); i++) {
  899. bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t));
  900. ASSERT_EQ(2 * sizeof(word_t), bytes_read);
  901. ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0)
  902. << "Offset at " << i << " failed";
  903. }
  904. // Verify small unaligned reads.
  905. for (size_t i = 1; i < sizeof(word_t); i++) {
  906. for (size_t j = 1; j < sizeof(word_t); j++) {
  907. // Set one byte past what we expect to read, to guarantee we don't overread.
  908. data[j] = '\0';
  909. bytes_read = backtrace->Read(read_addr + i, data, j);
  910. ASSERT_EQ(j, bytes_read);
  911. ASSERT_TRUE(memcmp(data, &expected[i], j) == 0)
  912. << "Offset at " << i << " length " << j << " miscompared";
  913. ASSERT_EQ('\0', data[j])
  914. << "Offset at " << i << " length " << j << " wrote too much data";
  915. }
  916. }
  917. delete[] data;
  918. delete[] expected;
  919. }
  920. TEST_F(BacktraceTest, thread_read) {
  921. pthread_attr_t attr;
  922. pthread_attr_init(&attr);
  923. pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  924. pthread_t thread;
  925. thread_t thread_data = { 0, 0, 0, nullptr };
  926. ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0);
  927. ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
  928. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
  929. ASSERT_TRUE(backtrace.get() != nullptr);
  930. RunReadTest(backtrace.get(), reinterpret_cast<uint64_t>(thread_data.data));
  931. android_atomic_acquire_store(0, &thread_data.state);
  932. ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
  933. }
  934. // The code requires these variables are the same size.
  935. volatile uint64_t g_ready = 0;
  936. volatile uint64_t g_addr = 0;
  937. static_assert(sizeof(g_ready) == sizeof(g_addr), "g_ready/g_addr must be same size");
  938. static void ForkedReadTest() {
  939. // Create two map pages.
  940. size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
  941. uint8_t* memory;
  942. if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
  943. perror("Failed to allocate memory\n");
  944. exit(1);
  945. }
  946. // Mark the second page as not-readable.
  947. if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
  948. perror("Failed to mprotect memory\n");
  949. exit(1);
  950. }
  951. // Set up a simple pattern in memory.
  952. InitMemory(memory, pagesize);
  953. g_addr = reinterpret_cast<uint64_t>(memory);
  954. g_ready = 1;
  955. while (1) {
  956. usleep(US_PER_MSEC);
  957. }
  958. }
  959. TEST_F(BacktraceTest, process_read) {
  960. g_ready = 0;
  961. pid_t pid;
  962. if ((pid = fork()) == 0) {
  963. ForkedReadTest();
  964. exit(0);
  965. }
  966. ASSERT_NE(-1, pid);
  967. bool test_executed = false;
  968. uint64_t start = NanoTime();
  969. while (1) {
  970. if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) {
  971. WaitForStop(pid);
  972. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
  973. ASSERT_TRUE(backtrace.get() != nullptr);
  974. uint64_t read_addr;
  975. size_t bytes_read = backtrace->Read(reinterpret_cast<uint64_t>(&g_ready),
  976. reinterpret_cast<uint8_t*>(&read_addr), sizeof(g_ready));
  977. ASSERT_EQ(sizeof(g_ready), bytes_read);
  978. if (read_addr) {
  979. // The forked process is ready to be read.
  980. bytes_read = backtrace->Read(reinterpret_cast<uint64_t>(&g_addr),
  981. reinterpret_cast<uint8_t*>(&read_addr), sizeof(g_addr));
  982. ASSERT_EQ(sizeof(g_addr), bytes_read);
  983. RunReadTest(backtrace.get(), read_addr);
  984. test_executed = true;
  985. break;
  986. }
  987. ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
  988. }
  989. if ((NanoTime() - start) > 5 * NS_PER_SEC) {
  990. break;
  991. }
  992. usleep(US_PER_MSEC);
  993. }
  994. kill(pid, SIGKILL);
  995. ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
  996. ASSERT_TRUE(test_executed);
  997. }
  998. static void VerifyFunctionsFound(const std::vector<std::string>& found_functions) {
  999. // We expect to find these functions in libbacktrace_test. If we don't
  1000. // find them, that's a bug in the memory read handling code in libunwind.
  1001. std::list<std::string> expected_functions;
  1002. expected_functions.push_back("test_recursive_call");
  1003. expected_functions.push_back("test_level_one");
  1004. expected_functions.push_back("test_level_two");
  1005. expected_functions.push_back("test_level_three");
  1006. expected_functions.push_back("test_level_four");
  1007. for (const auto& found_function : found_functions) {
  1008. for (const auto& expected_function : expected_functions) {
  1009. if (found_function == expected_function) {
  1010. expected_functions.remove(found_function);
  1011. break;
  1012. }
  1013. }
  1014. }
  1015. ASSERT_TRUE(expected_functions.empty()) << "Not all functions found in shared library.";
  1016. }
  1017. static void CopySharedLibrary(const char* tmp_dir, std::string* tmp_so_name) {
  1018. std::string test_lib(testing::internal::GetArgvs()[0]);
  1019. auto const value = test_lib.find_last_of('/');
  1020. if (value == std::string::npos) {
  1021. test_lib = "../backtrace_test_libs/";
  1022. } else {
  1023. test_lib = test_lib.substr(0, value + 1) + "../backtrace_test_libs/";
  1024. }
  1025. test_lib += "libbacktrace_test.so";
  1026. *tmp_so_name = std::string(tmp_dir) + "/libbacktrace_test.so";
  1027. std::string cp_cmd = android::base::StringPrintf("cp %s %s", test_lib.c_str(), tmp_dir);
  1028. // Copy the shared so to a tempory directory.
  1029. ASSERT_EQ(0, system(cp_cmd.c_str()));
  1030. }
  1031. TEST_F(BacktraceTest, check_unreadable_elf_local) {
  1032. TemporaryDir td;
  1033. std::string tmp_so_name;
  1034. ASSERT_NO_FATAL_FAILURE(CopySharedLibrary(td.path, &tmp_so_name));
  1035. struct stat buf;
  1036. ASSERT_TRUE(stat(tmp_so_name.c_str(), &buf) != -1);
  1037. uint64_t map_size = buf.st_size;
  1038. int fd = open(tmp_so_name.c_str(), O_RDONLY);
  1039. ASSERT_TRUE(fd != -1);
  1040. void* map = mmap(nullptr, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
  1041. ASSERT_TRUE(map != MAP_FAILED);
  1042. close(fd);
  1043. ASSERT_TRUE(unlink(tmp_so_name.c_str()) != -1);
  1044. std::vector<std::string> found_functions;
  1045. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
  1046. BACKTRACE_CURRENT_THREAD));
  1047. ASSERT_TRUE(backtrace.get() != nullptr);
  1048. // Needed before GetFunctionName will work.
  1049. backtrace->Unwind(0);
  1050. // Loop through the entire map, and get every function we can find.
  1051. map_size += reinterpret_cast<uint64_t>(map);
  1052. std::string last_func;
  1053. for (uint64_t read_addr = reinterpret_cast<uint64_t>(map); read_addr < map_size; read_addr += 4) {
  1054. uint64_t offset;
  1055. std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
  1056. if (!func_name.empty() && last_func != func_name) {
  1057. found_functions.push_back(func_name);
  1058. }
  1059. last_func = func_name;
  1060. }
  1061. ASSERT_TRUE(munmap(map, map_size - reinterpret_cast<uint64_t>(map)) == 0);
  1062. VerifyFunctionsFound(found_functions);
  1063. }
  1064. TEST_F(BacktraceTest, check_unreadable_elf_remote) {
  1065. TemporaryDir td;
  1066. std::string tmp_so_name;
  1067. ASSERT_NO_FATAL_FAILURE(CopySharedLibrary(td.path, &tmp_so_name));
  1068. g_ready = 0;
  1069. struct stat buf;
  1070. ASSERT_TRUE(stat(tmp_so_name.c_str(), &buf) != -1);
  1071. uint64_t map_size = buf.st_size;
  1072. pid_t pid;
  1073. if ((pid = fork()) == 0) {
  1074. int fd = open(tmp_so_name.c_str(), O_RDONLY);
  1075. if (fd == -1) {
  1076. fprintf(stderr, "Failed to open file %s: %s\n", tmp_so_name.c_str(), strerror(errno));
  1077. unlink(tmp_so_name.c_str());
  1078. exit(0);
  1079. }
  1080. void* map = mmap(nullptr, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
  1081. if (map == MAP_FAILED) {
  1082. fprintf(stderr, "Failed to map in memory: %s\n", strerror(errno));
  1083. unlink(tmp_so_name.c_str());
  1084. exit(0);
  1085. }
  1086. close(fd);
  1087. if (unlink(tmp_so_name.c_str()) == -1) {
  1088. fprintf(stderr, "Failed to unlink: %s\n", strerror(errno));
  1089. exit(0);
  1090. }
  1091. g_addr = reinterpret_cast<uint64_t>(map);
  1092. g_ready = 1;
  1093. while (true) {
  1094. usleep(US_PER_MSEC);
  1095. }
  1096. exit(0);
  1097. }
  1098. ASSERT_TRUE(pid > 0);
  1099. std::vector<std::string> found_functions;
  1100. uint64_t start = NanoTime();
  1101. while (true) {
  1102. ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
  1103. // Wait for the process to get to a stopping point.
  1104. WaitForStop(pid);
  1105. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
  1106. ASSERT_TRUE(backtrace.get() != nullptr);
  1107. uint64_t read_addr;
  1108. ASSERT_EQ(sizeof(g_ready),
  1109. backtrace->Read(reinterpret_cast<uint64_t>(&g_ready),
  1110. reinterpret_cast<uint8_t*>(&read_addr), sizeof(g_ready)));
  1111. if (read_addr) {
  1112. ASSERT_EQ(sizeof(g_addr),
  1113. backtrace->Read(reinterpret_cast<uint64_t>(&g_addr),
  1114. reinterpret_cast<uint8_t*>(&read_addr), sizeof(uint64_t)));
  1115. // Needed before GetFunctionName will work.
  1116. backtrace->Unwind(0);
  1117. // Loop through the entire map, and get every function we can find.
  1118. map_size += read_addr;
  1119. std::string last_func;
  1120. for (; read_addr < map_size; read_addr += 4) {
  1121. uint64_t offset;
  1122. std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
  1123. if (!func_name.empty() && last_func != func_name) {
  1124. found_functions.push_back(func_name);
  1125. }
  1126. last_func = func_name;
  1127. }
  1128. break;
  1129. }
  1130. ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
  1131. if ((NanoTime() - start) > 5 * NS_PER_SEC) {
  1132. break;
  1133. }
  1134. usleep(US_PER_MSEC);
  1135. }
  1136. kill(pid, SIGKILL);
  1137. ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
  1138. VerifyFunctionsFound(found_functions);
  1139. }
  1140. static bool FindFuncFrameInBacktrace(Backtrace* backtrace, uint64_t test_func, size_t* frame_num) {
  1141. backtrace_map_t map;
  1142. backtrace->FillInMap(test_func, &map);
  1143. if (!BacktraceMap::IsValid(map)) {
  1144. return false;
  1145. }
  1146. // Loop through the frames, and find the one that is in the map.
  1147. *frame_num = 0;
  1148. for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
  1149. if (BacktraceMap::IsValid(it->map) && map.start == it->map.start &&
  1150. it->pc >= test_func) {
  1151. *frame_num = it->num;
  1152. return true;
  1153. }
  1154. }
  1155. return false;
  1156. }
  1157. static void VerifyUnreadableElfFrame(Backtrace* backtrace, uint64_t test_func, size_t frame_num) {
  1158. ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
  1159. << DumpFrames(backtrace);
  1160. ASSERT_TRUE(frame_num != 0) << DumpFrames(backtrace);
  1161. // Make sure that there is at least one more frame above the test func call.
  1162. ASSERT_LT(frame_num, backtrace->NumFrames()) << DumpFrames(backtrace);
  1163. uint64_t diff = backtrace->GetFrame(frame_num)->pc - test_func;
  1164. ASSERT_LT(diff, 200U) << DumpFrames(backtrace);
  1165. }
  1166. static void VerifyUnreadableElfBacktrace(void* func) {
  1167. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
  1168. BACKTRACE_CURRENT_THREAD));
  1169. ASSERT_TRUE(backtrace.get() != nullptr);
  1170. ASSERT_TRUE(backtrace->Unwind(0));
  1171. VERIFY_NO_ERROR(backtrace->GetError().error_code);
  1172. size_t frame_num;
  1173. uint64_t test_func = reinterpret_cast<uint64_t>(func);
  1174. ASSERT_TRUE(FindFuncFrameInBacktrace(backtrace.get(), test_func, &frame_num))
  1175. << DumpFrames(backtrace.get());
  1176. VerifyUnreadableElfFrame(backtrace.get(), test_func, frame_num);
  1177. }
  1178. typedef int (*test_func_t)(int, int, int, int, void (*)(void*), void*);
  1179. TEST_F(BacktraceTest, unwind_through_unreadable_elf_local) {
  1180. TemporaryDir td;
  1181. std::string tmp_so_name;
  1182. ASSERT_NO_FATAL_FAILURE(CopySharedLibrary(td.path, &tmp_so_name));
  1183. void* lib_handle = dlopen(tmp_so_name.c_str(), RTLD_NOW);
  1184. ASSERT_TRUE(lib_handle != nullptr);
  1185. ASSERT_TRUE(unlink(tmp_so_name.c_str()) != -1);
  1186. test_func_t test_func;
  1187. test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
  1188. ASSERT_TRUE(test_func != nullptr);
  1189. ASSERT_NE(test_func(1, 2, 3, 4, VerifyUnreadableElfBacktrace, reinterpret_cast<void*>(test_func)),
  1190. 0);
  1191. }
  1192. TEST_F(BacktraceTest, unwind_through_unreadable_elf_remote) {
  1193. TemporaryDir td;
  1194. std::string tmp_so_name;
  1195. ASSERT_NO_FATAL_FAILURE(CopySharedLibrary(td.path, &tmp_so_name));
  1196. void* lib_handle = dlopen(tmp_so_name.c_str(), RTLD_NOW);
  1197. ASSERT_TRUE(lib_handle != nullptr);
  1198. ASSERT_TRUE(unlink(tmp_so_name.c_str()) != -1);
  1199. test_func_t test_func;
  1200. test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
  1201. ASSERT_TRUE(test_func != nullptr);
  1202. pid_t pid;
  1203. if ((pid = fork()) == 0) {
  1204. test_func(1, 2, 3, 4, 0, 0);
  1205. exit(0);
  1206. }
  1207. ASSERT_TRUE(pid > 0);
  1208. uint64_t start = NanoTime();
  1209. bool done = false;
  1210. while (!done) {
  1211. ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
  1212. // Wait for the process to get to a stopping point.
  1213. WaitForStop(pid);
  1214. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
  1215. ASSERT_TRUE(backtrace.get() != nullptr);
  1216. ASSERT_TRUE(backtrace->Unwind(0));
  1217. VERIFY_NO_ERROR(backtrace->GetError().error_code);
  1218. size_t frame_num;
  1219. if (FindFuncFrameInBacktrace(backtrace.get(), reinterpret_cast<uint64_t>(test_func),
  1220. &frame_num) &&
  1221. frame_num != 0) {
  1222. VerifyUnreadableElfFrame(backtrace.get(), reinterpret_cast<uint64_t>(test_func), frame_num);
  1223. done = true;
  1224. }
  1225. ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
  1226. if ((NanoTime() - start) > 5 * NS_PER_SEC) {
  1227. break;
  1228. }
  1229. usleep(US_PER_MSEC);
  1230. }
  1231. kill(pid, SIGKILL);
  1232. ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
  1233. ASSERT_TRUE(done) << "Test function never found in unwind.";
  1234. }
  1235. TEST_F(BacktraceTest, unwind_thread_doesnt_exist) {
  1236. std::unique_ptr<Backtrace> backtrace(
  1237. Backtrace::Create(BACKTRACE_CURRENT_PROCESS, 99999999));
  1238. ASSERT_TRUE(backtrace.get() != nullptr);
  1239. ASSERT_FALSE(backtrace->Unwind(0));
  1240. ASSERT_EQ(BACKTRACE_UNWIND_ERROR_THREAD_DOESNT_EXIST, backtrace->GetError().error_code);
  1241. }
  1242. TEST_F(BacktraceTest, local_get_function_name_before_unwind) {
  1243. std::unique_ptr<Backtrace> backtrace(
  1244. Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
  1245. ASSERT_TRUE(backtrace.get() != nullptr);
  1246. // Verify that trying to get a function name before doing an unwind works.
  1247. uint64_t cur_func_offset = reinterpret_cast<uint64_t>(test_level_one_) + 1;
  1248. uint64_t offset;
  1249. ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
  1250. }
  1251. TEST_F(BacktraceTest, remote_get_function_name_before_unwind) {
  1252. pid_t pid;
  1253. CreateRemoteProcess(&pid);
  1254. // Now create an unwind object.
  1255. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
  1256. // Verify that trying to get a function name before doing an unwind works.
  1257. uint64_t cur_func_offset = reinterpret_cast<uint64_t>(test_level_one_) + 1;
  1258. uint64_t offset;
  1259. ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
  1260. FinishRemoteProcess(pid);
  1261. }
  1262. static void SetUcontextSp(uint64_t sp, ucontext_t* ucontext) {
  1263. #if defined(__arm__)
  1264. ucontext->uc_mcontext.arm_sp = sp;
  1265. #elif defined(__aarch64__)
  1266. ucontext->uc_mcontext.sp = sp;
  1267. #elif defined(__i386__)
  1268. ucontext->uc_mcontext.gregs[REG_ESP] = sp;
  1269. #elif defined(__x86_64__)
  1270. ucontext->uc_mcontext.gregs[REG_RSP] = sp;
  1271. #else
  1272. UNUSED(sp);
  1273. UNUSED(ucontext);
  1274. ASSERT_TRUE(false) << "Unsupported architecture";
  1275. #endif
  1276. }
  1277. static void SetUcontextPc(uint64_t pc, ucontext_t* ucontext) {
  1278. #if defined(__arm__)
  1279. ucontext->uc_mcontext.arm_pc = pc;
  1280. #elif defined(__aarch64__)
  1281. ucontext->uc_mcontext.pc = pc;
  1282. #elif defined(__i386__)
  1283. ucontext->uc_mcontext.gregs[REG_EIP] = pc;
  1284. #elif defined(__x86_64__)
  1285. ucontext->uc_mcontext.gregs[REG_RIP] = pc;
  1286. #else
  1287. UNUSED(pc);
  1288. UNUSED(ucontext);
  1289. ASSERT_TRUE(false) << "Unsupported architecture";
  1290. #endif
  1291. }
  1292. static void SetUcontextLr(uint64_t lr, ucontext_t* ucontext) {
  1293. #if defined(__arm__)
  1294. ucontext->uc_mcontext.arm_lr = lr;
  1295. #elif defined(__aarch64__)
  1296. ucontext->uc_mcontext.regs[30] = lr;
  1297. #elif defined(__i386__)
  1298. // The lr is on the stack.
  1299. ASSERT_TRUE(lr != 0);
  1300. ASSERT_TRUE(ucontext != nullptr);
  1301. #elif defined(__x86_64__)
  1302. // The lr is on the stack.
  1303. ASSERT_TRUE(lr != 0);
  1304. ASSERT_TRUE(ucontext != nullptr);
  1305. #else
  1306. UNUSED(lr);
  1307. UNUSED(ucontext);
  1308. ASSERT_TRUE(false) << "Unsupported architecture";
  1309. #endif
  1310. }
  1311. static constexpr size_t DEVICE_MAP_SIZE = 1024;
  1312. static void SetupDeviceMap(void** device_map) {
  1313. // Make sure that anything in a device map will result in fails
  1314. // to read.
  1315. android::base::unique_fd device_fd(open("/dev/zero", O_RDONLY | O_CLOEXEC));
  1316. *device_map = mmap(nullptr, 1024, PROT_READ, MAP_PRIVATE, device_fd, 0);
  1317. ASSERT_TRUE(*device_map != MAP_FAILED);
  1318. // Make sure the map is readable.
  1319. ASSERT_EQ(0, reinterpret_cast<int*>(*device_map)[0]);
  1320. }
  1321. static void UnwindFromDevice(Backtrace* backtrace, void* device_map) {
  1322. uint64_t device_map_uint = reinterpret_cast<uint64_t>(device_map);
  1323. backtrace_map_t map;
  1324. backtrace->FillInMap(device_map_uint, &map);
  1325. // Verify the flag is set.
  1326. ASSERT_EQ(PROT_DEVICE_MAP, map.flags & PROT_DEVICE_MAP);
  1327. // Quick sanity checks.
  1328. uint64_t offset;
  1329. ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset));
  1330. ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset, &map));
  1331. ASSERT_EQ(std::string(""), backtrace->GetFunctionName(0, &offset));
  1332. uint64_t cur_func_offset = reinterpret_cast<uint64_t>(BacktraceTest::test_level_one_) + 1;
  1333. // Now verify the device map flag actually causes the function name to be empty.
  1334. backtrace->FillInMap(cur_func_offset, &map);
  1335. ASSERT_TRUE((map.flags & PROT_DEVICE_MAP) == 0);
  1336. ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
  1337. map.flags |= PROT_DEVICE_MAP;
  1338. ASSERT_EQ(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
  1339. ucontext_t ucontext;
  1340. // Create a context that has the pc in the device map, but the sp
  1341. // in a non-device map.
  1342. memset(&ucontext, 0, sizeof(ucontext));
  1343. SetUcontextSp(reinterpret_cast<uint64_t>(&ucontext), &ucontext);
  1344. SetUcontextPc(device_map_uint, &ucontext);
  1345. SetUcontextLr(cur_func_offset, &ucontext);
  1346. ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
  1347. // The buffer should only be a single element.
  1348. ASSERT_EQ(1U, backtrace->NumFrames());
  1349. const backtrace_frame_data_t* frame = backtrace->GetFrame(0);
  1350. ASSERT_EQ(device_map_uint, frame->pc);
  1351. ASSERT_EQ(reinterpret_cast<uint64_t>(&ucontext), frame->sp);
  1352. // Check what happens when skipping the first frame.
  1353. ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
  1354. ASSERT_EQ(0U, backtrace->NumFrames());
  1355. // Create a context that has the sp in the device map, but the pc
  1356. // in a non-device map.
  1357. memset(&ucontext, 0, sizeof(ucontext));
  1358. SetUcontextSp(device_map_uint, &ucontext);
  1359. SetUcontextPc(cur_func_offset, &ucontext);
  1360. SetUcontextLr(cur_func_offset, &ucontext);
  1361. ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
  1362. // The buffer should only be a single element.
  1363. ASSERT_EQ(1U, backtrace->NumFrames());
  1364. frame = backtrace->GetFrame(0);
  1365. ASSERT_EQ(cur_func_offset, frame->pc);
  1366. ASSERT_EQ(device_map_uint, frame->sp);
  1367. // Check what happens when skipping the first frame.
  1368. ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
  1369. ASSERT_EQ(0U, backtrace->NumFrames());
  1370. }
  1371. TEST_F(BacktraceTest, unwind_disallow_device_map_local) {
  1372. void* device_map;
  1373. SetupDeviceMap(&device_map);
  1374. // Now create an unwind object.
  1375. std::unique_ptr<Backtrace> backtrace(
  1376. Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
  1377. ASSERT_TRUE(backtrace);
  1378. UnwindFromDevice(backtrace.get(), device_map);
  1379. munmap(device_map, DEVICE_MAP_SIZE);
  1380. }
  1381. TEST_F(BacktraceTest, unwind_disallow_device_map_remote) {
  1382. void* device_map;
  1383. SetupDeviceMap(&device_map);
  1384. // Fork a process to do a remote backtrace.
  1385. pid_t pid;
  1386. CreateRemoteProcess(&pid);
  1387. // Now create an unwind object.
  1388. std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
  1389. UnwindFromDevice(backtrace.get(), device_map);
  1390. FinishRemoteProcess(pid);
  1391. munmap(device_map, DEVICE_MAP_SIZE);
  1392. }
  1393. class ScopedSignalHandler {
  1394. public:
  1395. ScopedSignalHandler(int signal_number, void (*handler)(int)) : signal_number_(signal_number) {
  1396. memset(&action_, 0, sizeof(action_));
  1397. action_.sa_handler = handler;
  1398. sigaction(signal_number_, &action_, &old_action_);
  1399. }
  1400. ScopedSignalHandler(int signal_number, void (*action)(int, siginfo_t*, void*))
  1401. : signal_number_(signal_number) {
  1402. memset(&action_, 0, sizeof(action_));
  1403. action_.sa_flags = SA_SIGINFO;
  1404. action_.sa_sigaction = action;
  1405. sigaction(signal_number_, &action_, &old_action_);
  1406. }
  1407. ~ScopedSignalHandler() { sigaction(signal_number_, &old_action_, nullptr); }
  1408. private:
  1409. struct sigaction action_;
  1410. struct sigaction old_action_;
  1411. const int signal_number_;
  1412. };
  1413. static void SetValueAndLoop(void* data) {
  1414. volatile int* value = reinterpret_cast<volatile int*>(data);
  1415. *value = 1;
  1416. for (volatile int i = 0;; i++)
  1417. ;
  1418. }
  1419. static void UnwindThroughSignal(bool use_action, create_func_t create_func,
  1420. map_create_func_t map_create_func) {
  1421. volatile int value = 0;
  1422. pid_t pid;
  1423. if ((pid = fork()) == 0) {
  1424. if (use_action) {
  1425. ScopedSignalHandler ssh(SIGUSR1, BacktraceTest::test_signal_action_);
  1426. BacktraceTest::test_level_one_(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
  1427. } else {
  1428. ScopedSignalHandler ssh(SIGUSR1, BacktraceTest::test_signal_handler_);
  1429. BacktraceTest::test_level_one_(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
  1430. }
  1431. }
  1432. ASSERT_NE(-1, pid);
  1433. int read_value = 0;
  1434. uint64_t start = NanoTime();
  1435. while (read_value == 0) {
  1436. usleep(1000);
  1437. // Loop until the remote function gets into the final function.
  1438. ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
  1439. WaitForStop(pid);
  1440. std::unique_ptr<BacktraceMap> map(map_create_func(pid, false));
  1441. std::unique_ptr<Backtrace> backtrace(create_func(pid, pid, map.get()));
  1442. size_t bytes_read = backtrace->Read(reinterpret_cast<uint64_t>(const_cast<int*>(&value)),
  1443. reinterpret_cast<uint8_t*>(&read_value), sizeof(read_value));
  1444. ASSERT_EQ(sizeof(read_value), bytes_read);
  1445. ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
  1446. ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
  1447. << "Remote process did not execute far enough in 5 seconds.";
  1448. }
  1449. // Now need to send a signal to the remote process.
  1450. kill(pid, SIGUSR1);
  1451. // Wait for the process to get to the signal handler loop.
  1452. Backtrace::const_iterator frame_iter;
  1453. start = NanoTime();
  1454. std::unique_ptr<BacktraceMap> map;
  1455. std::unique_ptr<Backtrace> backtrace;
  1456. while (true) {
  1457. usleep(1000);
  1458. ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
  1459. WaitForStop(pid);
  1460. map.reset(map_create_func(pid, false));
  1461. ASSERT_TRUE(map.get() != nullptr);
  1462. backtrace.reset(create_func(pid, pid, map.get()));
  1463. ASSERT_TRUE(backtrace->Unwind(0));
  1464. bool found = false;
  1465. for (frame_iter = backtrace->begin(); frame_iter != backtrace->end(); ++frame_iter) {
  1466. if (frame_iter->func_name == "test_loop_forever") {
  1467. ++frame_iter;
  1468. found = true;
  1469. break;
  1470. }
  1471. }
  1472. if (found) {
  1473. break;
  1474. }
  1475. ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
  1476. ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
  1477. << "Remote process did not get in signal handler in 5 seconds." << std::endl
  1478. << DumpFrames(backtrace.get());
  1479. }
  1480. std::vector<std::string> names;
  1481. // Loop through the frames, and save the function names.
  1482. size_t frame = 0;
  1483. for (; frame_iter != backtrace->end(); ++frame_iter) {
  1484. if (frame_iter->func_name == "test_level_four") {
  1485. frame = names.size() + 1;
  1486. }
  1487. names.push_back(frame_iter->func_name);
  1488. }
  1489. ASSERT_NE(0U, frame) << "Unable to find test_level_four in backtrace" << std::endl
  1490. << DumpFrames(backtrace.get());
  1491. // The expected order of the frames:
  1492. // test_loop_forever
  1493. // test_signal_handler|test_signal_action
  1494. // <OPTIONAL_FRAME> May or may not exist.
  1495. // SetValueAndLoop (but the function name might be empty)
  1496. // test_level_four
  1497. // test_level_three
  1498. // test_level_two
  1499. // test_level_one
  1500. ASSERT_LE(frame + 2, names.size()) << DumpFrames(backtrace.get());
  1501. ASSERT_LE(2U, frame) << DumpFrames(backtrace.get());
  1502. if (use_action) {
  1503. ASSERT_EQ("test_signal_action", names[0]) << DumpFrames(backtrace.get());
  1504. } else {
  1505. ASSERT_EQ("test_signal_handler", names[0]) << DumpFrames(backtrace.get());
  1506. }
  1507. ASSERT_EQ("test_level_three", names[frame]) << DumpFrames(backtrace.get());
  1508. ASSERT_EQ("test_level_two", names[frame + 1]) << DumpFrames(backtrace.get());
  1509. ASSERT_EQ("test_level_one", names[frame + 2]) << DumpFrames(backtrace.get());
  1510. FinishRemoteProcess(pid);
  1511. }
  1512. TEST_F(BacktraceTest, unwind_remote_through_signal_using_handler) {
  1513. UnwindThroughSignal(false, Backtrace::Create, BacktraceMap::Create);
  1514. }
  1515. TEST_F(BacktraceTest, unwind_remote_through_signal_using_action) {
  1516. UnwindThroughSignal(true, Backtrace::Create, BacktraceMap::Create);
  1517. }
  1518. static void TestFrameSkipNumbering(create_func_t create_func, map_create_func_t map_create_func) {
  1519. std::unique_ptr<BacktraceMap> map(map_create_func(getpid(), false));
  1520. std::unique_ptr<Backtrace> backtrace(
  1521. create_func(getpid(), android::base::GetThreadId(), map.get()));
  1522. backtrace->Unwind(1);
  1523. ASSERT_NE(0U, backtrace->NumFrames());
  1524. ASSERT_EQ(0U, backtrace->GetFrame(0)->num);
  1525. }
  1526. TEST_F(BacktraceTest, unwind_frame_skip_numbering) {
  1527. TestFrameSkipNumbering(Backtrace::Create, BacktraceMap::Create);
  1528. }
  1529. #define MAX_LEAK_BYTES (32*1024UL)
  1530. static void CheckForLeak(pid_t pid, pid_t tid) {
  1531. std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
  1532. // Loop enough that even a small leak should be detectable.
  1533. size_t first_allocated_bytes = 0;
  1534. size_t last_allocated_bytes = 0;
  1535. for (size_t i = 0; i < 4096; i++) {
  1536. Backtrace* backtrace = Backtrace::Create(pid, tid, map.get());
  1537. ASSERT_TRUE(backtrace != nullptr);
  1538. ASSERT_TRUE(backtrace->Unwind(0));
  1539. VERIFY_NO_ERROR(backtrace->GetError().error_code);
  1540. delete backtrace;
  1541. size_t allocated_bytes = mallinfo().uordblks;
  1542. if (first_allocated_bytes == 0) {
  1543. first_allocated_bytes = allocated_bytes;
  1544. } else if (last_allocated_bytes > first_allocated_bytes) {
  1545. // Check that the memory did not increase too much over the first loop.
  1546. ASSERT_LE(last_allocated_bytes - first_allocated_bytes, MAX_LEAK_BYTES);
  1547. }
  1548. last_allocated_bytes = allocated_bytes;
  1549. }
  1550. }
  1551. TEST_F(BacktraceTest, check_for_leak_local) {
  1552. CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD);
  1553. }
  1554. TEST_F(BacktraceTest, check_for_leak_local_thread) {
  1555. thread_t thread_data = { 0, 0, 0, nullptr };
  1556. pthread_t thread;
  1557. ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0);
  1558. // Wait up to 2 seconds for the tid to be set.
  1559. ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
  1560. CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid);
  1561. // Tell the thread to exit its infinite loop.
  1562. android_atomic_acquire_store(0, &thread_data.state);
  1563. ASSERT_TRUE(pthread_join(thread, nullptr) == 0);
  1564. }
  1565. TEST_F(BacktraceTest, check_for_leak_remote) {
  1566. pid_t pid;
  1567. CreateRemoteProcess(&pid);
  1568. CheckForLeak(pid, BACKTRACE_CURRENT_THREAD);
  1569. FinishRemoteProcess(pid);
  1570. }