Allocator.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468
  1. /*
  2. * Copyright (C) 2016 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. // Header page:
  17. //
  18. // For minimum allocation size (8 bytes), bitmap can store used allocations for
  19. // up to 4032*8*8=258048, which is 256KiB minus the header page
  20. #include <assert.h>
  21. #include <stdlib.h>
  22. #include <sys/cdefs.h>
  23. #include <sys/mman.h>
  24. #include <sys/prctl.h>
  25. #include <cmath>
  26. #include <cstddef>
  27. #include <cstdint>
  28. #include <memory>
  29. #include <mutex>
  30. #include "android-base/macros.h"
  31. #include "Allocator.h"
  32. #include "LinkedList.h"
  33. namespace android {
  34. // runtime interfaces used:
  35. // abort
  36. // assert - fprintf + mmap
  37. // mmap
  38. // munmap
  39. // prctl
  40. constexpr size_t const_log2(size_t n, size_t p = 0) {
  41. return (n <= 1) ? p : const_log2(n / 2, p + 1);
  42. }
  43. constexpr unsigned int div_round_up(unsigned int x, unsigned int y) {
  44. return (x + y - 1) / y;
  45. }
  46. static constexpr size_t kPageSize = 4096;
  47. static constexpr size_t kChunkSize = 256 * 1024;
  48. static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
  49. static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
  50. static constexpr size_t kMinBucketAllocationSize = 8;
  51. static constexpr unsigned int kNumBuckets =
  52. const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1;
  53. static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize;
  54. std::atomic<int> heap_count;
  55. class Chunk;
  56. class HeapImpl {
  57. public:
  58. HeapImpl();
  59. ~HeapImpl();
  60. void* operator new(std::size_t count) noexcept;
  61. void operator delete(void* ptr);
  62. void* Alloc(size_t size);
  63. void Free(void* ptr);
  64. bool Empty();
  65. void MoveToFullList(Chunk* chunk, int bucket_);
  66. void MoveToFreeList(Chunk* chunk, int bucket_);
  67. private:
  68. DISALLOW_COPY_AND_ASSIGN(HeapImpl);
  69. LinkedList<Chunk*> free_chunks_[kNumBuckets];
  70. LinkedList<Chunk*> full_chunks_[kNumBuckets];
  71. void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
  72. void* MapAlloc(size_t size);
  73. void MapFree(void* ptr);
  74. void* AllocLocked(size_t size);
  75. void FreeLocked(void* ptr);
  76. struct MapAllocation {
  77. void* ptr;
  78. size_t size;
  79. MapAllocation* next;
  80. };
  81. MapAllocation* map_allocation_list_;
  82. std::mutex m_;
  83. };
  84. // Integer log 2, rounds down
  85. static inline unsigned int log2(size_t n) {
  86. return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
  87. }
  88. static inline unsigned int size_to_bucket(size_t size) {
  89. if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize;
  90. return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
  91. }
  92. static inline size_t bucket_to_size(unsigned int bucket) {
  93. return kMinBucketAllocationSize << bucket;
  94. }
  95. static void* MapAligned(size_t size, size_t align) {
  96. const int prot = PROT_READ | PROT_WRITE;
  97. const int flags = MAP_ANONYMOUS | MAP_PRIVATE;
  98. size = (size + kPageSize - 1) & ~(kPageSize - 1);
  99. // Over-allocate enough to align
  100. size_t map_size = size + align - kPageSize;
  101. if (map_size < size) {
  102. return nullptr;
  103. }
  104. void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
  105. if (ptr == MAP_FAILED) {
  106. return nullptr;
  107. }
  108. size_t aligned_size = map_size;
  109. void* aligned_ptr = ptr;
  110. std::align(align, size, aligned_ptr, aligned_size);
  111. // Trim beginning
  112. if (aligned_ptr != ptr) {
  113. ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(ptr);
  114. munmap(ptr, extra);
  115. map_size -= extra;
  116. ptr = aligned_ptr;
  117. }
  118. // Trim end
  119. if (map_size != size) {
  120. assert(map_size > size);
  121. assert(ptr != NULL);
  122. munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), map_size - size);
  123. }
  124. #if defined(PR_SET_VMA)
  125. prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<uintptr_t>(ptr), size,
  126. "leak_detector_malloc");
  127. #endif
  128. return ptr;
  129. }
  130. class Chunk {
  131. public:
  132. static void* operator new(std::size_t count) noexcept;
  133. static void operator delete(void* ptr);
  134. Chunk(HeapImpl* heap, int bucket);
  135. ~Chunk() {}
  136. void* Alloc();
  137. void Free(void* ptr);
  138. void Purge();
  139. bool Empty();
  140. static Chunk* ptr_to_chunk(void* ptr) {
  141. return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) & ~(kChunkSize - 1));
  142. }
  143. static bool is_chunk(void* ptr) {
  144. return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
  145. }
  146. unsigned int free_count() { return free_count_; }
  147. HeapImpl* heap() { return heap_; }
  148. LinkedList<Chunk*> node_; // linked list sorted by minimum free count
  149. private:
  150. DISALLOW_COPY_AND_ASSIGN(Chunk);
  151. HeapImpl* heap_;
  152. unsigned int bucket_;
  153. unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
  154. unsigned int max_allocations_; // maximum number of allocations in the chunk
  155. unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
  156. unsigned int free_count_; // number of available allocations
  157. unsigned int frees_since_purge_; // number of calls to Free since last Purge
  158. // bitmap of pages that have been dirtied
  159. uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
  160. // bitmap of free allocations.
  161. uint32_t free_bitmap_[kUsableChunkSize / kMinBucketAllocationSize / 32];
  162. char data_[0];
  163. unsigned int ptr_to_n(void* ptr) {
  164. ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(data_);
  165. return offset / allocation_size_;
  166. }
  167. void* n_to_ptr(unsigned int n) { return data_ + n * allocation_size_; }
  168. };
  169. static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
  170. // Override new operator on chunk to use mmap to allocate kChunkSize
  171. void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
  172. assert(count == sizeof(Chunk));
  173. void* mem = MapAligned(kChunkSize, kChunkSize);
  174. if (!mem) {
  175. abort(); // throw std::bad_alloc;
  176. }
  177. return mem;
  178. }
  179. // Override new operator on chunk to use mmap to allocate kChunkSize
  180. void Chunk::operator delete(void* ptr) {
  181. assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
  182. munmap(ptr, kChunkSize);
  183. }
  184. Chunk::Chunk(HeapImpl* heap, int bucket)
  185. : node_(this),
  186. heap_(heap),
  187. bucket_(bucket),
  188. allocation_size_(bucket_to_size(bucket)),
  189. max_allocations_(kUsableChunkSize / allocation_size_),
  190. first_free_bitmap_(0),
  191. free_count_(max_allocations_),
  192. frees_since_purge_(0) {
  193. memset(dirty_pages_, 0, sizeof(dirty_pages_));
  194. memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
  195. }
  196. bool Chunk::Empty() {
  197. return free_count_ == max_allocations_;
  198. }
  199. void* Chunk::Alloc() {
  200. assert(free_count_ > 0);
  201. unsigned int i = first_free_bitmap_;
  202. while (free_bitmap_[i] == 0) i++;
  203. assert(i < arraysize(free_bitmap_));
  204. unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
  205. assert(free_bitmap_[i] & (1U << bit));
  206. free_bitmap_[i] &= ~(1U << bit);
  207. unsigned int n = i * 32 + bit;
  208. assert(n < max_allocations_);
  209. unsigned int page = n * allocation_size_ / kPageSize;
  210. assert(page / 32 < arraysize(dirty_pages_));
  211. dirty_pages_[page / 32] |= 1U << (page % 32);
  212. free_count_--;
  213. if (free_count_ == 0) {
  214. heap_->MoveToFullList(this, bucket_);
  215. }
  216. return n_to_ptr(n);
  217. }
  218. void Chunk::Free(void* ptr) {
  219. assert(is_chunk(ptr));
  220. assert(ptr_to_chunk(ptr) == this);
  221. unsigned int n = ptr_to_n(ptr);
  222. unsigned int i = n / 32;
  223. unsigned int bit = n % 32;
  224. assert(i < arraysize(free_bitmap_));
  225. assert(!(free_bitmap_[i] & (1U << bit)));
  226. free_bitmap_[i] |= 1U << bit;
  227. free_count_++;
  228. if (i < first_free_bitmap_) {
  229. first_free_bitmap_ = i;
  230. }
  231. if (free_count_ == 1) {
  232. heap_->MoveToFreeList(this, bucket_);
  233. } else {
  234. // TODO(ccross): move down free list if necessary
  235. }
  236. if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
  237. Purge();
  238. }
  239. }
  240. void Chunk::Purge() {
  241. frees_since_purge_ = 0;
  242. // unsigned int allocsPerPage = kPageSize / allocation_size_;
  243. }
  244. // Override new operator on HeapImpl to use mmap to allocate a page
  245. void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept {
  246. assert(count == sizeof(HeapImpl));
  247. void* mem = MapAligned(kPageSize, kPageSize);
  248. if (!mem) {
  249. abort(); // throw std::bad_alloc;
  250. }
  251. heap_count++;
  252. return mem;
  253. }
  254. void HeapImpl::operator delete(void* ptr) {
  255. munmap(ptr, kPageSize);
  256. }
  257. HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {}
  258. bool HeapImpl::Empty() {
  259. for (unsigned int i = 0; i < kNumBuckets; i++) {
  260. for (LinkedList<Chunk*>* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
  261. if (!it->data()->Empty()) {
  262. return false;
  263. }
  264. }
  265. for (LinkedList<Chunk*>* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
  266. if (!it->data()->Empty()) {
  267. return false;
  268. }
  269. }
  270. }
  271. return true;
  272. }
  273. HeapImpl::~HeapImpl() {
  274. for (unsigned int i = 0; i < kNumBuckets; i++) {
  275. while (!free_chunks_[i].empty()) {
  276. Chunk* chunk = free_chunks_[i].next()->data();
  277. chunk->node_.remove();
  278. delete chunk;
  279. }
  280. while (!full_chunks_[i].empty()) {
  281. Chunk* chunk = full_chunks_[i].next()->data();
  282. chunk->node_.remove();
  283. delete chunk;
  284. }
  285. }
  286. }
  287. void* HeapImpl::Alloc(size_t size) {
  288. std::lock_guard<std::mutex> lk(m_);
  289. return AllocLocked(size);
  290. }
  291. void* HeapImpl::AllocLocked(size_t size) {
  292. if (size > kMaxBucketAllocationSize) {
  293. return MapAlloc(size);
  294. }
  295. int bucket = size_to_bucket(size);
  296. if (free_chunks_[bucket].empty()) {
  297. Chunk* chunk = new Chunk(this, bucket);
  298. free_chunks_[bucket].insert(chunk->node_);
  299. }
  300. return free_chunks_[bucket].next()->data()->Alloc();
  301. }
  302. void HeapImpl::Free(void* ptr) {
  303. std::lock_guard<std::mutex> lk(m_);
  304. FreeLocked(ptr);
  305. }
  306. void HeapImpl::FreeLocked(void* ptr) {
  307. if (!Chunk::is_chunk(ptr)) {
  308. HeapImpl::MapFree(ptr);
  309. } else {
  310. Chunk* chunk = Chunk::ptr_to_chunk(ptr);
  311. assert(chunk->heap() == this);
  312. chunk->Free(ptr);
  313. }
  314. }
  315. void* HeapImpl::MapAlloc(size_t size) {
  316. size = (size + kPageSize - 1) & ~(kPageSize - 1);
  317. MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(sizeof(MapAllocation)));
  318. void* ptr = MapAligned(size, kChunkSize);
  319. if (!ptr) {
  320. FreeLocked(allocation);
  321. abort(); // throw std::bad_alloc;
  322. }
  323. allocation->ptr = ptr;
  324. allocation->size = size;
  325. allocation->next = map_allocation_list_;
  326. map_allocation_list_ = allocation;
  327. return ptr;
  328. }
  329. void HeapImpl::MapFree(void* ptr) {
  330. MapAllocation** allocation = &map_allocation_list_;
  331. while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next;
  332. assert(*allocation != nullptr);
  333. munmap((*allocation)->ptr, (*allocation)->size);
  334. FreeLocked(*allocation);
  335. *allocation = (*allocation)->next;
  336. }
  337. void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) {
  338. MoveToList(chunk, &free_chunks_[bucket]);
  339. }
  340. void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) {
  341. MoveToList(chunk, &full_chunks_[bucket]);
  342. }
  343. void HeapImpl::MoveToList(Chunk* chunk, LinkedList<Chunk*>* head) {
  344. // Remove from old list
  345. chunk->node_.remove();
  346. LinkedList<Chunk*>* node = head;
  347. // Insert into new list, sorted by lowest free count
  348. while (node->next() != head && node->data() != nullptr &&
  349. node->data()->free_count() < chunk->free_count())
  350. node = node->next();
  351. node->insert(chunk->node_);
  352. }
  353. Heap::Heap() {
  354. // HeapImpl overloads the operator new in order to mmap itself instead of
  355. // allocating with new.
  356. // Can't use a shared_ptr to store the result because shared_ptr needs to
  357. // allocate, and Allocator<T> is still being constructed.
  358. impl_ = new HeapImpl();
  359. owns_impl_ = true;
  360. }
  361. Heap::~Heap() {
  362. if (owns_impl_) {
  363. delete impl_;
  364. }
  365. }
  366. void* Heap::allocate(size_t size) {
  367. return impl_->Alloc(size);
  368. }
  369. void Heap::deallocate(void* ptr) {
  370. impl_->Free(ptr);
  371. }
  372. void Heap::deallocate(HeapImpl* impl, void* ptr) {
  373. impl->Free(ptr);
  374. }
  375. bool Heap::empty() {
  376. return impl_->Empty();
  377. }
  378. } // namespace android