RefBase_test.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. /*
  2. * Copyright (C) 2016 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #include <utils/StrongPointer.h>
  18. #include <utils/RefBase.h>
  19. #include <thread>
  20. #include <atomic>
  21. #include <sched.h>
  22. #include <errno.h>
  23. // Enhanced version of StrongPointer_test, but using RefBase underneath.
  24. using namespace android;
  25. static constexpr int NITERS = 1000000;
  26. static constexpr int INITIAL_STRONG_VALUE = 1 << 28; // Mirroring RefBase definition.
  27. class Foo : public RefBase {
  28. public:
  29. Foo(bool* deleted_check) : mDeleted(deleted_check) {
  30. *mDeleted = false;
  31. }
  32. ~Foo() {
  33. *mDeleted = true;
  34. }
  35. private:
  36. bool* mDeleted;
  37. };
  38. // A version of Foo that ensures that all objects are allocated at the same
  39. // address. No more than one can be allocated at a time. Thread-hostile.
  40. class FooFixedAlloc : public RefBase {
  41. public:
  42. static void* operator new(size_t size) {
  43. if (mAllocCount != 0) {
  44. abort();
  45. }
  46. mAllocCount = 1;
  47. if (theMemory == nullptr) {
  48. theMemory = malloc(size);
  49. }
  50. return theMemory;
  51. }
  52. static void operator delete(void *p) {
  53. if (mAllocCount != 1 || p != theMemory) {
  54. abort();
  55. }
  56. mAllocCount = 0;
  57. }
  58. FooFixedAlloc(bool* deleted_check) : mDeleted(deleted_check) {
  59. *mDeleted = false;
  60. }
  61. ~FooFixedAlloc() {
  62. *mDeleted = true;
  63. }
  64. private:
  65. bool* mDeleted;
  66. static int mAllocCount;
  67. static void* theMemory;
  68. };
  69. int FooFixedAlloc::mAllocCount(0);
  70. void* FooFixedAlloc::theMemory(nullptr);
  71. TEST(RefBase, StrongMoves) {
  72. bool isDeleted;
  73. Foo* foo = new Foo(&isDeleted);
  74. ASSERT_EQ(INITIAL_STRONG_VALUE, foo->getStrongCount());
  75. ASSERT_FALSE(isDeleted) << "Already deleted...?";
  76. sp<Foo> sp1(foo);
  77. wp<Foo> wp1(sp1);
  78. ASSERT_EQ(1, foo->getStrongCount());
  79. // Weak count includes both strong and weak references.
  80. ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
  81. {
  82. sp<Foo> sp2 = std::move(sp1);
  83. ASSERT_EQ(1, foo->getStrongCount())
  84. << "std::move failed, incremented refcnt";
  85. ASSERT_EQ(nullptr, sp1.get()) << "std::move failed, sp1 is still valid";
  86. // The strong count isn't increasing, let's double check the old object
  87. // is properly reset and doesn't early delete
  88. sp1 = std::move(sp2);
  89. }
  90. ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
  91. {
  92. // Now let's double check it deletes on time
  93. sp<Foo> sp2 = std::move(sp1);
  94. }
  95. ASSERT_TRUE(isDeleted) << "foo was leaked!";
  96. ASSERT_TRUE(wp1.promote().get() == nullptr);
  97. }
  98. TEST(RefBase, WeakCopies) {
  99. bool isDeleted;
  100. Foo* foo = new Foo(&isDeleted);
  101. EXPECT_EQ(0, foo->getWeakRefs()->getWeakCount());
  102. ASSERT_FALSE(isDeleted) << "Foo (weak) already deleted...?";
  103. wp<Foo> wp1(foo);
  104. EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
  105. {
  106. wp<Foo> wp2 = wp1;
  107. ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
  108. }
  109. EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
  110. ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
  111. wp1 = nullptr;
  112. ASSERT_FALSE(isDeleted) << "Deletion on wp destruction should no longer occur";
  113. }
  114. TEST(RefBase, Comparisons) {
  115. bool isDeleted, isDeleted2, isDeleted3;
  116. Foo* foo = new Foo(&isDeleted);
  117. Foo* foo2 = new Foo(&isDeleted2);
  118. sp<Foo> sp1(foo);
  119. sp<Foo> sp2(foo2);
  120. wp<Foo> wp1(sp1);
  121. wp<Foo> wp2(sp1);
  122. wp<Foo> wp3(sp2);
  123. ASSERT_TRUE(wp1 == wp2);
  124. ASSERT_TRUE(wp1 == sp1);
  125. ASSERT_TRUE(wp3 == sp2);
  126. ASSERT_TRUE(wp1 != sp2);
  127. ASSERT_TRUE(wp1 <= wp2);
  128. ASSERT_TRUE(wp1 >= wp2);
  129. ASSERT_FALSE(wp1 != wp2);
  130. ASSERT_FALSE(wp1 > wp2);
  131. ASSERT_FALSE(wp1 < wp2);
  132. ASSERT_FALSE(sp1 == sp2);
  133. ASSERT_TRUE(sp1 != sp2);
  134. bool sp1_smaller = sp1 < sp2;
  135. wp<Foo>wp_smaller = sp1_smaller ? wp1 : wp3;
  136. wp<Foo>wp_larger = sp1_smaller ? wp3 : wp1;
  137. ASSERT_TRUE(wp_smaller < wp_larger);
  138. ASSERT_TRUE(wp_smaller != wp_larger);
  139. ASSERT_TRUE(wp_smaller <= wp_larger);
  140. ASSERT_FALSE(wp_smaller == wp_larger);
  141. ASSERT_FALSE(wp_smaller > wp_larger);
  142. ASSERT_FALSE(wp_smaller >= wp_larger);
  143. sp2 = nullptr;
  144. ASSERT_TRUE(isDeleted2);
  145. ASSERT_FALSE(isDeleted);
  146. ASSERT_FALSE(wp3 == sp2);
  147. // Comparison results on weak pointers should not be affected.
  148. ASSERT_TRUE(wp_smaller < wp_larger);
  149. ASSERT_TRUE(wp_smaller != wp_larger);
  150. ASSERT_TRUE(wp_smaller <= wp_larger);
  151. ASSERT_FALSE(wp_smaller == wp_larger);
  152. ASSERT_FALSE(wp_smaller > wp_larger);
  153. ASSERT_FALSE(wp_smaller >= wp_larger);
  154. wp2 = nullptr;
  155. ASSERT_FALSE(wp1 == wp2);
  156. ASSERT_TRUE(wp1 != wp2);
  157. wp1.clear();
  158. ASSERT_TRUE(wp1 == wp2);
  159. ASSERT_FALSE(wp1 != wp2);
  160. wp3.clear();
  161. ASSERT_TRUE(wp1 == wp3);
  162. ASSERT_FALSE(wp1 != wp3);
  163. ASSERT_FALSE(isDeleted);
  164. sp1.clear();
  165. ASSERT_TRUE(isDeleted);
  166. ASSERT_TRUE(sp1 == sp2);
  167. // Try to check that null pointers are properly initialized.
  168. {
  169. // Try once with non-null, to maximize chances of getting junk on the
  170. // stack.
  171. sp<Foo> sp3(new Foo(&isDeleted3));
  172. wp<Foo> wp4(sp3);
  173. wp<Foo> wp5;
  174. ASSERT_FALSE(wp4 == wp5);
  175. ASSERT_TRUE(wp4 != wp5);
  176. ASSERT_FALSE(sp3 == wp5);
  177. ASSERT_FALSE(wp5 == sp3);
  178. ASSERT_TRUE(sp3 != wp5);
  179. ASSERT_TRUE(wp5 != sp3);
  180. ASSERT_TRUE(sp3 == wp4);
  181. }
  182. {
  183. sp<Foo> sp3;
  184. wp<Foo> wp4(sp3);
  185. wp<Foo> wp5;
  186. ASSERT_TRUE(wp4 == wp5);
  187. ASSERT_FALSE(wp4 != wp5);
  188. ASSERT_TRUE(sp3 == wp5);
  189. ASSERT_TRUE(wp5 == sp3);
  190. ASSERT_FALSE(sp3 != wp5);
  191. ASSERT_FALSE(wp5 != sp3);
  192. ASSERT_TRUE(sp3 == wp4);
  193. }
  194. }
  195. // Check whether comparison against dead wp works, even if the object referenced
  196. // by the new wp happens to be at the same address.
  197. TEST(RefBase, ReplacedComparison) {
  198. bool isDeleted, isDeleted2;
  199. FooFixedAlloc* foo = new FooFixedAlloc(&isDeleted);
  200. sp<FooFixedAlloc> sp1(foo);
  201. wp<FooFixedAlloc> wp1(sp1);
  202. ASSERT_TRUE(wp1 == sp1);
  203. sp1.clear(); // Deallocates the object.
  204. ASSERT_TRUE(isDeleted);
  205. FooFixedAlloc* foo2 = new FooFixedAlloc(&isDeleted2);
  206. ASSERT_FALSE(isDeleted2);
  207. ASSERT_EQ(foo, foo2); // Not technically a legal comparison, but ...
  208. sp<FooFixedAlloc> sp2(foo2);
  209. wp<FooFixedAlloc> wp2(sp2);
  210. ASSERT_TRUE(sp2 == wp2);
  211. ASSERT_FALSE(sp2 != wp2);
  212. ASSERT_TRUE(sp2 != wp1);
  213. ASSERT_FALSE(sp2 == wp1);
  214. ASSERT_FALSE(sp2 == sp1); // sp1 is null.
  215. ASSERT_FALSE(wp1 == wp2); // wp1 refers to old object.
  216. ASSERT_TRUE(wp1 != wp2);
  217. ASSERT_TRUE(wp1 > wp2 || wp1 < wp2);
  218. ASSERT_TRUE(wp1 >= wp2 || wp1 <= wp2);
  219. ASSERT_FALSE(wp1 >= wp2 && wp1 <= wp2);
  220. ASSERT_FALSE(wp1 == nullptr);
  221. wp1 = sp2;
  222. ASSERT_TRUE(wp1 == wp2);
  223. ASSERT_FALSE(wp1 != wp2);
  224. }
  225. // Set up a situation in which we race with visit2AndRremove() to delete
  226. // 2 strong references. Bar destructor checks that there are no early
  227. // deletions and prior updates are visible to destructor.
  228. class Bar : public RefBase {
  229. public:
  230. Bar(std::atomic<int>* delete_count) : mVisited1(false), mVisited2(false),
  231. mDeleteCount(delete_count) {
  232. }
  233. ~Bar() {
  234. EXPECT_TRUE(mVisited1);
  235. EXPECT_TRUE(mVisited2);
  236. (*mDeleteCount)++;
  237. }
  238. bool mVisited1;
  239. bool mVisited2;
  240. private:
  241. std::atomic<int>* mDeleteCount;
  242. };
  243. static sp<Bar> buffer;
  244. static std::atomic<bool> bufferFull(false);
  245. // Wait until bufferFull has value val.
  246. static inline void waitFor(bool val) {
  247. while (bufferFull != val) {}
  248. }
  249. cpu_set_t otherCpus;
  250. // Divide the cpus we're allowed to run on into myCpus and otherCpus.
  251. // Set origCpus to the processors we were originally allowed to run on.
  252. // Return false if origCpus doesn't include at least processors 0 and 1.
  253. static bool setExclusiveCpus(cpu_set_t* origCpus /* out */,
  254. cpu_set_t* myCpus /* out */, cpu_set_t* otherCpus) {
  255. if (sched_getaffinity(0, sizeof(cpu_set_t), origCpus) != 0) {
  256. return false;
  257. }
  258. if (!CPU_ISSET(0, origCpus) || !CPU_ISSET(1, origCpus)) {
  259. return false;
  260. }
  261. CPU_ZERO(myCpus);
  262. CPU_ZERO(otherCpus);
  263. CPU_OR(myCpus, myCpus, origCpus);
  264. CPU_OR(otherCpus, otherCpus, origCpus);
  265. for (unsigned i = 0; i < CPU_SETSIZE; ++i) {
  266. // I get the even cores, the other thread gets the odd ones.
  267. if (i & 1) {
  268. CPU_CLR(i, myCpus);
  269. } else {
  270. CPU_CLR(i, otherCpus);
  271. }
  272. }
  273. return true;
  274. }
  275. static void visit2AndRemove() {
  276. if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
  277. FAIL() << "setaffinity returned:" << errno;
  278. }
  279. for (int i = 0; i < NITERS; ++i) {
  280. waitFor(true);
  281. buffer->mVisited2 = true;
  282. buffer = nullptr;
  283. bufferFull = false;
  284. }
  285. }
  286. TEST(RefBase, RacingDestructors) {
  287. cpu_set_t origCpus;
  288. cpu_set_t myCpus;
  289. // Restrict us and the helper thread to disjoint cpu sets.
  290. // This prevents us from getting scheduled against each other,
  291. // which would be atrociously slow.
  292. if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
  293. std::thread t(visit2AndRemove);
  294. std::atomic<int> deleteCount(0);
  295. if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
  296. FAIL() << "setaffinity returned:" << errno;
  297. }
  298. for (int i = 0; i < NITERS; ++i) {
  299. waitFor(false);
  300. Bar* bar = new Bar(&deleteCount);
  301. sp<Bar> sp3(bar);
  302. buffer = sp3;
  303. bufferFull = true;
  304. ASSERT_TRUE(bar->getStrongCount() >= 1);
  305. // Weak count includes strong count.
  306. ASSERT_TRUE(bar->getWeakRefs()->getWeakCount() >= 1);
  307. sp3->mVisited1 = true;
  308. sp3 = nullptr;
  309. }
  310. t.join();
  311. if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
  312. FAIL();
  313. }
  314. ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
  315. } // Otherwise this is slow and probably pointless on a uniprocessor.
  316. }
  317. static wp<Bar> wpBuffer;
  318. static std::atomic<bool> wpBufferFull(false);
  319. // Wait until wpBufferFull has value val.
  320. static inline void wpWaitFor(bool val) {
  321. while (wpBufferFull != val) {}
  322. }
  323. static void visit3AndRemove() {
  324. if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
  325. FAIL() << "setaffinity returned:" << errno;
  326. }
  327. for (int i = 0; i < NITERS; ++i) {
  328. wpWaitFor(true);
  329. {
  330. sp<Bar> sp1 = wpBuffer.promote();
  331. // We implicitly check that sp1 != NULL
  332. sp1->mVisited2 = true;
  333. }
  334. wpBuffer = nullptr;
  335. wpBufferFull = false;
  336. }
  337. }
  338. TEST(RefBase, RacingPromotions) {
  339. cpu_set_t origCpus;
  340. cpu_set_t myCpus;
  341. // Restrict us and the helper thread to disjoint cpu sets.
  342. // This prevents us from getting scheduled against each other,
  343. // which would be atrociously slow.
  344. if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
  345. std::thread t(visit3AndRemove);
  346. std::atomic<int> deleteCount(0);
  347. if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
  348. FAIL() << "setaffinity returned:" << errno;
  349. }
  350. for (int i = 0; i < NITERS; ++i) {
  351. Bar* bar = new Bar(&deleteCount);
  352. wp<Bar> wp1(bar);
  353. bar->mVisited1 = true;
  354. if (i % (NITERS / 10) == 0) {
  355. // Do this rarely, since it generates a log message.
  356. wp1 = nullptr; // No longer destroys the object.
  357. wp1 = bar;
  358. }
  359. wpBuffer = wp1;
  360. ASSERT_EQ(bar->getWeakRefs()->getWeakCount(), 2);
  361. wpBufferFull = true;
  362. // Promotion races with that in visit3AndRemove.
  363. // This may or may not succeed, but it shouldn't interfere with
  364. // the concurrent one.
  365. sp<Bar> sp1 = wp1.promote();
  366. wpWaitFor(false); // Waits for other thread to drop strong pointer.
  367. sp1 = nullptr;
  368. // No strong pointers here.
  369. sp1 = wp1.promote();
  370. ASSERT_EQ(sp1.get(), nullptr) << "Dead wp promotion succeeded!";
  371. }
  372. t.join();
  373. if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
  374. FAIL();
  375. }
  376. ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
  377. } // Otherwise this is slow and probably pointless on a uniprocessor.
  378. }