IMemory.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /*
  2. * Copyright (C) 2008 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define LOG_TAG "IMemory"
  17. #include <atomic>
  18. #include <stdatomic.h>
  19. #include <fcntl.h>
  20. #include <stdint.h>
  21. #include <stdio.h>
  22. #include <stdlib.h>
  23. #include <sys/types.h>
  24. #include <sys/mman.h>
  25. #include <unistd.h>
  26. #include <binder/IMemory.h>
  27. #include <binder/Parcel.h>
  28. #include <log/log.h>
  29. #include <utils/KeyedVector.h>
  30. #include <utils/threads.h>
  31. #define VERBOSE 0
  32. namespace android {
  33. // ---------------------------------------------------------------------------
  34. class HeapCache : public IBinder::DeathRecipient
  35. {
  36. public:
  37. HeapCache();
  38. virtual ~HeapCache();
  39. virtual void binderDied(const wp<IBinder>& who);
  40. sp<IMemoryHeap> find_heap(const sp<IBinder>& binder);
  41. void free_heap(const sp<IBinder>& binder);
  42. sp<IMemoryHeap> get_heap(const sp<IBinder>& binder);
  43. void dump_heaps();
  44. private:
  45. // For IMemory.cpp
  46. struct heap_info_t {
  47. sp<IMemoryHeap> heap;
  48. int32_t count;
  49. // Note that this cannot be meaningfully copied.
  50. };
  51. void free_heap(const wp<IBinder>& binder);
  52. Mutex mHeapCacheLock; // Protects entire vector below.
  53. KeyedVector< wp<IBinder>, heap_info_t > mHeapCache;
  54. // We do not use the copy-on-write capabilities of KeyedVector.
  55. // TODO: Reimplemement based on standard C++ container?
  56. };
  57. static sp<HeapCache> gHeapCache = new HeapCache();
  58. /******************************************************************************/
  59. enum {
  60. HEAP_ID = IBinder::FIRST_CALL_TRANSACTION
  61. };
  62. class BpMemoryHeap : public BpInterface<IMemoryHeap>
  63. {
  64. public:
  65. explicit BpMemoryHeap(const sp<IBinder>& impl);
  66. virtual ~BpMemoryHeap();
  67. virtual int getHeapID() const;
  68. virtual void* getBase() const;
  69. virtual size_t getSize() const;
  70. virtual uint32_t getFlags() const;
  71. off_t getOffset() const override;
  72. private:
  73. friend class IMemory;
  74. friend class HeapCache;
  75. // for debugging in this module
  76. static inline sp<IMemoryHeap> find_heap(const sp<IBinder>& binder) {
  77. return gHeapCache->find_heap(binder);
  78. }
  79. static inline void free_heap(const sp<IBinder>& binder) {
  80. gHeapCache->free_heap(binder);
  81. }
  82. static inline sp<IMemoryHeap> get_heap(const sp<IBinder>& binder) {
  83. return gHeapCache->get_heap(binder);
  84. }
  85. static inline void dump_heaps() {
  86. gHeapCache->dump_heaps();
  87. }
  88. void assertMapped() const;
  89. void assertReallyMapped() const;
  90. mutable std::atomic<int32_t> mHeapId;
  91. mutable void* mBase;
  92. mutable size_t mSize;
  93. mutable uint32_t mFlags;
  94. mutable off_t mOffset;
  95. mutable bool mRealHeap;
  96. mutable Mutex mLock;
  97. };
  98. // ----------------------------------------------------------------------------
  99. enum {
  100. GET_MEMORY = IBinder::FIRST_CALL_TRANSACTION
  101. };
  102. class BpMemory : public BpInterface<IMemory>
  103. {
  104. public:
  105. explicit BpMemory(const sp<IBinder>& impl);
  106. virtual ~BpMemory();
  107. // NOLINTNEXTLINE(google-default-arguments)
  108. virtual sp<IMemoryHeap> getMemory(ssize_t* offset=nullptr, size_t* size=nullptr) const;
  109. private:
  110. mutable sp<IMemoryHeap> mHeap;
  111. mutable ssize_t mOffset;
  112. mutable size_t mSize;
  113. };
  114. /******************************************************************************/
  115. void* IMemory::fastPointer(const sp<IBinder>& binder, ssize_t offset) const
  116. {
  117. sp<IMemoryHeap> realHeap = BpMemoryHeap::get_heap(binder);
  118. void* const base = realHeap->base();
  119. if (base == MAP_FAILED)
  120. return nullptr;
  121. return static_cast<char*>(base) + offset;
  122. }
  123. void* IMemory::pointer() const {
  124. ssize_t offset;
  125. sp<IMemoryHeap> heap = getMemory(&offset);
  126. void* const base = heap!=nullptr ? heap->base() : MAP_FAILED;
  127. if (base == MAP_FAILED)
  128. return nullptr;
  129. return static_cast<char*>(base) + offset;
  130. }
  131. size_t IMemory::size() const {
  132. size_t size;
  133. getMemory(nullptr, &size);
  134. return size;
  135. }
  136. ssize_t IMemory::offset() const {
  137. ssize_t offset;
  138. getMemory(&offset);
  139. return offset;
  140. }
  141. /******************************************************************************/
  142. BpMemory::BpMemory(const sp<IBinder>& impl)
  143. : BpInterface<IMemory>(impl), mOffset(0), mSize(0)
  144. {
  145. }
  146. BpMemory::~BpMemory()
  147. {
  148. }
  149. // NOLINTNEXTLINE(google-default-arguments)
  150. sp<IMemoryHeap> BpMemory::getMemory(ssize_t* offset, size_t* size) const
  151. {
  152. if (mHeap == nullptr) {
  153. Parcel data, reply;
  154. data.writeInterfaceToken(IMemory::getInterfaceDescriptor());
  155. if (remote()->transact(GET_MEMORY, data, &reply) == NO_ERROR) {
  156. sp<IBinder> heap = reply.readStrongBinder();
  157. if (heap != nullptr) {
  158. mHeap = interface_cast<IMemoryHeap>(heap);
  159. if (mHeap != nullptr) {
  160. const int64_t offset64 = reply.readInt64();
  161. const uint64_t size64 = reply.readUint64();
  162. const ssize_t o = (ssize_t)offset64;
  163. const size_t s = (size_t)size64;
  164. size_t heapSize = mHeap->getSize();
  165. if (s == size64 && o == offset64 // ILP32 bounds check
  166. && s <= heapSize
  167. && o >= 0
  168. && (static_cast<size_t>(o) <= heapSize - s)) {
  169. mOffset = o;
  170. mSize = s;
  171. } else {
  172. // Hm.
  173. android_errorWriteWithInfoLog(0x534e4554,
  174. "26877992", -1, nullptr, 0);
  175. mOffset = 0;
  176. mSize = 0;
  177. }
  178. }
  179. }
  180. }
  181. }
  182. if (offset) *offset = mOffset;
  183. if (size) *size = mSize;
  184. return (mSize > 0) ? mHeap : nullptr;
  185. }
  186. // ---------------------------------------------------------------------------
  187. IMPLEMENT_META_INTERFACE(Memory, "android.utils.IMemory");
  188. BnMemory::BnMemory() {
  189. }
  190. BnMemory::~BnMemory() {
  191. }
  192. // NOLINTNEXTLINE(google-default-arguments)
  193. status_t BnMemory::onTransact(
  194. uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
  195. {
  196. switch(code) {
  197. case GET_MEMORY: {
  198. CHECK_INTERFACE(IMemory, data, reply);
  199. ssize_t offset;
  200. size_t size;
  201. reply->writeStrongBinder( IInterface::asBinder(getMemory(&offset, &size)) );
  202. reply->writeInt64(offset);
  203. reply->writeUint64(size);
  204. return NO_ERROR;
  205. } break;
  206. default:
  207. return BBinder::onTransact(code, data, reply, flags);
  208. }
  209. }
  210. /******************************************************************************/
  211. BpMemoryHeap::BpMemoryHeap(const sp<IBinder>& impl)
  212. : BpInterface<IMemoryHeap>(impl),
  213. mHeapId(-1), mBase(MAP_FAILED), mSize(0), mFlags(0), mOffset(0), mRealHeap(false)
  214. {
  215. }
  216. BpMemoryHeap::~BpMemoryHeap() {
  217. int32_t heapId = mHeapId.load(memory_order_relaxed);
  218. if (heapId != -1) {
  219. close(heapId);
  220. if (mRealHeap) {
  221. // by construction we're the last one
  222. if (mBase != MAP_FAILED) {
  223. sp<IBinder> binder = IInterface::asBinder(this);
  224. if (VERBOSE) {
  225. ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d",
  226. binder.get(), this, mSize, heapId);
  227. }
  228. munmap(mBase, mSize);
  229. }
  230. } else {
  231. // remove from list only if it was mapped before
  232. sp<IBinder> binder = IInterface::asBinder(this);
  233. free_heap(binder);
  234. }
  235. }
  236. }
  237. void BpMemoryHeap::assertMapped() const
  238. {
  239. int32_t heapId = mHeapId.load(memory_order_acquire);
  240. if (heapId == -1) {
  241. sp<IBinder> binder(IInterface::asBinder(const_cast<BpMemoryHeap*>(this)));
  242. sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get()));
  243. heap->assertReallyMapped();
  244. if (heap->mBase != MAP_FAILED) {
  245. Mutex::Autolock _l(mLock);
  246. if (mHeapId.load(memory_order_relaxed) == -1) {
  247. mBase = heap->mBase;
  248. mSize = heap->mSize;
  249. mOffset = heap->mOffset;
  250. int fd = fcntl(heap->mHeapId.load(memory_order_relaxed), F_DUPFD_CLOEXEC, 0);
  251. ALOGE_IF(fd==-1, "cannot dup fd=%d",
  252. heap->mHeapId.load(memory_order_relaxed));
  253. mHeapId.store(fd, memory_order_release);
  254. }
  255. } else {
  256. // something went wrong
  257. free_heap(binder);
  258. }
  259. }
  260. }
  261. void BpMemoryHeap::assertReallyMapped() const
  262. {
  263. int32_t heapId = mHeapId.load(memory_order_acquire);
  264. if (heapId == -1) {
  265. // remote call without mLock held, worse case scenario, we end up
  266. // calling transact() from multiple threads, but that's not a problem,
  267. // only mmap below must be in the critical section.
  268. Parcel data, reply;
  269. data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor());
  270. status_t err = remote()->transact(HEAP_ID, data, &reply);
  271. int parcel_fd = reply.readFileDescriptor();
  272. const uint64_t size64 = reply.readUint64();
  273. const int64_t offset64 = reply.readInt64();
  274. const uint32_t flags = reply.readUint32();
  275. const size_t size = (size_t)size64;
  276. const off_t offset = (off_t)offset64;
  277. if (err != NO_ERROR || // failed transaction
  278. size != size64 || offset != offset64) { // ILP32 size check
  279. ALOGE("binder=%p transaction failed fd=%d, size=%zu, err=%d (%s)",
  280. IInterface::asBinder(this).get(),
  281. parcel_fd, size, err, strerror(-err));
  282. return;
  283. }
  284. Mutex::Autolock _l(mLock);
  285. if (mHeapId.load(memory_order_relaxed) == -1) {
  286. int fd = fcntl(parcel_fd, F_DUPFD_CLOEXEC, 0);
  287. ALOGE_IF(fd == -1, "cannot dup fd=%d, size=%zu, err=%d (%s)",
  288. parcel_fd, size, err, strerror(errno));
  289. int access = PROT_READ;
  290. if (!(flags & READ_ONLY)) {
  291. access |= PROT_WRITE;
  292. }
  293. mRealHeap = true;
  294. mBase = mmap(nullptr, size, access, MAP_SHARED, fd, offset);
  295. if (mBase == MAP_FAILED) {
  296. ALOGE("cannot map BpMemoryHeap (binder=%p), size=%zu, fd=%d (%s)",
  297. IInterface::asBinder(this).get(), size, fd, strerror(errno));
  298. close(fd);
  299. } else {
  300. mSize = size;
  301. mFlags = flags;
  302. mOffset = offset;
  303. mHeapId.store(fd, memory_order_release);
  304. }
  305. }
  306. }
  307. }
  308. int BpMemoryHeap::getHeapID() const {
  309. assertMapped();
  310. // We either stored mHeapId ourselves, or loaded it with acquire semantics.
  311. return mHeapId.load(memory_order_relaxed);
  312. }
  313. void* BpMemoryHeap::getBase() const {
  314. assertMapped();
  315. return mBase;
  316. }
  317. size_t BpMemoryHeap::getSize() const {
  318. assertMapped();
  319. return mSize;
  320. }
  321. uint32_t BpMemoryHeap::getFlags() const {
  322. assertMapped();
  323. return mFlags;
  324. }
  325. off_t BpMemoryHeap::getOffset() const {
  326. assertMapped();
  327. return mOffset;
  328. }
  329. // ---------------------------------------------------------------------------
  330. IMPLEMENT_META_INTERFACE(MemoryHeap, "android.utils.IMemoryHeap");
  331. BnMemoryHeap::BnMemoryHeap() {
  332. }
  333. BnMemoryHeap::~BnMemoryHeap() {
  334. }
  335. // NOLINTNEXTLINE(google-default-arguments)
  336. status_t BnMemoryHeap::onTransact(
  337. uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
  338. {
  339. switch(code) {
  340. case HEAP_ID: {
  341. CHECK_INTERFACE(IMemoryHeap, data, reply);
  342. reply->writeFileDescriptor(getHeapID());
  343. reply->writeUint64(getSize());
  344. reply->writeInt64(getOffset());
  345. reply->writeUint32(getFlags());
  346. return NO_ERROR;
  347. } break;
  348. default:
  349. return BBinder::onTransact(code, data, reply, flags);
  350. }
  351. }
  352. /*****************************************************************************/
  353. HeapCache::HeapCache()
  354. : DeathRecipient()
  355. {
  356. }
  357. HeapCache::~HeapCache()
  358. {
  359. }
  360. void HeapCache::binderDied(const wp<IBinder>& binder)
  361. {
  362. //ALOGD("binderDied binder=%p", binder.unsafe_get());
  363. free_heap(binder);
  364. }
  365. sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder)
  366. {
  367. Mutex::Autolock _l(mHeapCacheLock);
  368. ssize_t i = mHeapCache.indexOfKey(binder);
  369. if (i>=0) {
  370. heap_info_t& info = mHeapCache.editValueAt(i);
  371. ALOGD_IF(VERBOSE,
  372. "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
  373. binder.get(), info.heap.get(),
  374. static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
  375. static_cast<BpMemoryHeap*>(info.heap.get())
  376. ->mHeapId.load(memory_order_relaxed),
  377. info.count);
  378. ++info.count;
  379. return info.heap;
  380. } else {
  381. heap_info_t info;
  382. info.heap = interface_cast<IMemoryHeap>(binder);
  383. info.count = 1;
  384. //ALOGD("adding binder=%p, heap=%p, count=%d",
  385. // binder.get(), info.heap.get(), info.count);
  386. mHeapCache.add(binder, info);
  387. return info.heap;
  388. }
  389. }
  390. void HeapCache::free_heap(const sp<IBinder>& binder) {
  391. free_heap( wp<IBinder>(binder) );
  392. }
  393. void HeapCache::free_heap(const wp<IBinder>& binder)
  394. {
  395. sp<IMemoryHeap> rel;
  396. {
  397. Mutex::Autolock _l(mHeapCacheLock);
  398. ssize_t i = mHeapCache.indexOfKey(binder);
  399. if (i>=0) {
  400. heap_info_t& info(mHeapCache.editValueAt(i));
  401. if (--info.count == 0) {
  402. ALOGD_IF(VERBOSE,
  403. "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
  404. binder.unsafe_get(), info.heap.get(),
  405. static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
  406. static_cast<BpMemoryHeap*>(info.heap.get())
  407. ->mHeapId.load(memory_order_relaxed),
  408. info.count);
  409. rel = mHeapCache.valueAt(i).heap;
  410. mHeapCache.removeItemsAt(i);
  411. }
  412. } else {
  413. ALOGE("free_heap binder=%p not found!!!", binder.unsafe_get());
  414. }
  415. }
  416. }
  417. sp<IMemoryHeap> HeapCache::get_heap(const sp<IBinder>& binder)
  418. {
  419. sp<IMemoryHeap> realHeap;
  420. Mutex::Autolock _l(mHeapCacheLock);
  421. ssize_t i = mHeapCache.indexOfKey(binder);
  422. if (i>=0) realHeap = mHeapCache.valueAt(i).heap;
  423. else realHeap = interface_cast<IMemoryHeap>(binder);
  424. return realHeap;
  425. }
  426. void HeapCache::dump_heaps()
  427. {
  428. Mutex::Autolock _l(mHeapCacheLock);
  429. int c = mHeapCache.size();
  430. for (int i=0 ; i<c ; i++) {
  431. const heap_info_t& info = mHeapCache.valueAt(i);
  432. BpMemoryHeap const* h(static_cast<BpMemoryHeap const *>(info.heap.get()));
  433. ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)",
  434. mHeapCache.keyAt(i).unsafe_get(),
  435. info.heap.get(), info.count,
  436. h->mHeapId.load(memory_order_relaxed), h->mBase, h->mSize);
  437. }
  438. }
  439. // ---------------------------------------------------------------------------
  440. }; // namespace android