BufferQueueProducer.cpp 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. /*
  2. * Copyright 2014 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <inttypes.h>
  17. #define LOG_TAG "BufferQueueProducer"
  18. #define ATRACE_TAG ATRACE_TAG_GRAPHICS
  19. //#define LOG_NDEBUG 0
  20. #if DEBUG_ONLY_CODE
  21. #define VALIDATE_CONSISTENCY() do { mCore->validateConsistencyLocked(); } while (0)
  22. #else
  23. #define VALIDATE_CONSISTENCY()
  24. #endif
  25. #define EGL_EGLEXT_PROTOTYPES
  26. #include <binder/IPCThreadState.h>
  27. #include <gui/BufferItem.h>
  28. #include <gui/BufferQueueCore.h>
  29. #include <gui/BufferQueueProducer.h>
  30. #include <gui/GLConsumer.h>
  31. #include <gui/IConsumerListener.h>
  32. #include <gui/IProducerListener.h>
  33. #include <private/gui/BufferQueueThreadState.h>
  34. #include <utils/Log.h>
  35. #include <utils/Trace.h>
  36. #include <system/window.h>
  37. namespace android {
  38. static constexpr uint32_t BQ_LAYER_COUNT = 1;
  39. BufferQueueProducer::BufferQueueProducer(const sp<BufferQueueCore>& core,
  40. bool consumerIsSurfaceFlinger) :
  41. mCore(core),
  42. mSlots(core->mSlots),
  43. mConsumerName(),
  44. mStickyTransform(0),
  45. mConsumerIsSurfaceFlinger(consumerIsSurfaceFlinger),
  46. mLastQueueBufferFence(Fence::NO_FENCE),
  47. mLastQueuedTransform(0),
  48. mCallbackMutex(),
  49. mNextCallbackTicket(0),
  50. mCurrentCallbackTicket(0),
  51. mCallbackCondition(),
  52. mDequeueTimeout(-1),
  53. mDequeueWaitingForAllocation(false) {}
  54. BufferQueueProducer::~BufferQueueProducer() {}
  55. status_t BufferQueueProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
  56. ATRACE_CALL();
  57. BQ_LOGV("requestBuffer: slot %d", slot);
  58. std::lock_guard<std::mutex> lock(mCore->mMutex);
  59. if (mCore->mIsAbandoned) {
  60. BQ_LOGE("requestBuffer: BufferQueue has been abandoned");
  61. return NO_INIT;
  62. }
  63. if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
  64. BQ_LOGE("requestBuffer: BufferQueue has no connected producer");
  65. return NO_INIT;
  66. }
  67. if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
  68. BQ_LOGE("requestBuffer: slot index %d out of range [0, %d)",
  69. slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
  70. return BAD_VALUE;
  71. } else if (!mSlots[slot].mBufferState.isDequeued()) {
  72. BQ_LOGE("requestBuffer: slot %d is not owned by the producer "
  73. "(state = %s)", slot, mSlots[slot].mBufferState.string());
  74. return BAD_VALUE;
  75. }
  76. mSlots[slot].mRequestBufferCalled = true;
  77. *buf = mSlots[slot].mGraphicBuffer;
  78. return NO_ERROR;
  79. }
  80. status_t BufferQueueProducer::setMaxDequeuedBufferCount(
  81. int maxDequeuedBuffers) {
  82. ATRACE_CALL();
  83. BQ_LOGV("setMaxDequeuedBufferCount: maxDequeuedBuffers = %d",
  84. maxDequeuedBuffers);
  85. sp<IConsumerListener> listener;
  86. { // Autolock scope
  87. std::unique_lock<std::mutex> lock(mCore->mMutex);
  88. mCore->waitWhileAllocatingLocked(lock);
  89. if (mCore->mIsAbandoned) {
  90. BQ_LOGE("setMaxDequeuedBufferCount: BufferQueue has been "
  91. "abandoned");
  92. return NO_INIT;
  93. }
  94. if (maxDequeuedBuffers == mCore->mMaxDequeuedBufferCount) {
  95. return NO_ERROR;
  96. }
  97. // The new maxDequeuedBuffer count should not be violated by the number
  98. // of currently dequeued buffers
  99. int dequeuedCount = 0;
  100. for (int s : mCore->mActiveBuffers) {
  101. if (mSlots[s].mBufferState.isDequeued()) {
  102. dequeuedCount++;
  103. }
  104. }
  105. if (dequeuedCount > maxDequeuedBuffers) {
  106. BQ_LOGE("setMaxDequeuedBufferCount: the requested maxDequeuedBuffer"
  107. "count (%d) exceeds the current dequeued buffer count (%d)",
  108. maxDequeuedBuffers, dequeuedCount);
  109. return BAD_VALUE;
  110. }
  111. int bufferCount = mCore->getMinUndequeuedBufferCountLocked();
  112. bufferCount += maxDequeuedBuffers;
  113. if (bufferCount > BufferQueueDefs::NUM_BUFFER_SLOTS) {
  114. BQ_LOGE("setMaxDequeuedBufferCount: bufferCount %d too large "
  115. "(max %d)", bufferCount, BufferQueueDefs::NUM_BUFFER_SLOTS);
  116. return BAD_VALUE;
  117. }
  118. const int minBufferSlots = mCore->getMinMaxBufferCountLocked();
  119. if (bufferCount < minBufferSlots) {
  120. BQ_LOGE("setMaxDequeuedBufferCount: requested buffer count %d is "
  121. "less than minimum %d", bufferCount, minBufferSlots);
  122. return BAD_VALUE;
  123. }
  124. if (bufferCount > mCore->mMaxBufferCount) {
  125. BQ_LOGE("setMaxDequeuedBufferCount: %d dequeued buffers would "
  126. "exceed the maxBufferCount (%d) (maxAcquired %d async %d "
  127. "mDequeuedBufferCannotBlock %d)", maxDequeuedBuffers,
  128. mCore->mMaxBufferCount, mCore->mMaxAcquiredBufferCount,
  129. mCore->mAsyncMode, mCore->mDequeueBufferCannotBlock);
  130. return BAD_VALUE;
  131. }
  132. int delta = maxDequeuedBuffers - mCore->mMaxDequeuedBufferCount;
  133. if (!mCore->adjustAvailableSlotsLocked(delta)) {
  134. return BAD_VALUE;
  135. }
  136. mCore->mMaxDequeuedBufferCount = maxDequeuedBuffers;
  137. VALIDATE_CONSISTENCY();
  138. if (delta < 0) {
  139. listener = mCore->mConsumerListener;
  140. }
  141. mCore->mDequeueCondition.notify_all();
  142. } // Autolock scope
  143. // Call back without lock held
  144. if (listener != nullptr) {
  145. listener->onBuffersReleased();
  146. }
  147. return NO_ERROR;
  148. }
  149. status_t BufferQueueProducer::setAsyncMode(bool async) {
  150. ATRACE_CALL();
  151. BQ_LOGV("setAsyncMode: async = %d", async);
  152. sp<IConsumerListener> listener;
  153. { // Autolock scope
  154. std::unique_lock<std::mutex> lock(mCore->mMutex);
  155. mCore->waitWhileAllocatingLocked(lock);
  156. if (mCore->mIsAbandoned) {
  157. BQ_LOGE("setAsyncMode: BufferQueue has been abandoned");
  158. return NO_INIT;
  159. }
  160. if (async == mCore->mAsyncMode) {
  161. return NO_ERROR;
  162. }
  163. if ((mCore->mMaxAcquiredBufferCount + mCore->mMaxDequeuedBufferCount +
  164. (async || mCore->mDequeueBufferCannotBlock ? 1 : 0)) >
  165. mCore->mMaxBufferCount) {
  166. BQ_LOGE("setAsyncMode(%d): this call would cause the "
  167. "maxBufferCount (%d) to be exceeded (maxAcquired %d "
  168. "maxDequeued %d mDequeueBufferCannotBlock %d)", async,
  169. mCore->mMaxBufferCount, mCore->mMaxAcquiredBufferCount,
  170. mCore->mMaxDequeuedBufferCount,
  171. mCore->mDequeueBufferCannotBlock);
  172. return BAD_VALUE;
  173. }
  174. int delta = mCore->getMaxBufferCountLocked(async,
  175. mCore->mDequeueBufferCannotBlock, mCore->mMaxBufferCount)
  176. - mCore->getMaxBufferCountLocked();
  177. if (!mCore->adjustAvailableSlotsLocked(delta)) {
  178. BQ_LOGE("setAsyncMode: BufferQueue failed to adjust the number of "
  179. "available slots. Delta = %d", delta);
  180. return BAD_VALUE;
  181. }
  182. mCore->mAsyncMode = async;
  183. VALIDATE_CONSISTENCY();
  184. mCore->mDequeueCondition.notify_all();
  185. if (delta < 0) {
  186. listener = mCore->mConsumerListener;
  187. }
  188. } // Autolock scope
  189. // Call back without lock held
  190. if (listener != nullptr) {
  191. listener->onBuffersReleased();
  192. }
  193. return NO_ERROR;
  194. }
  195. int BufferQueueProducer::getFreeBufferLocked() const {
  196. if (mCore->mFreeBuffers.empty()) {
  197. return BufferQueueCore::INVALID_BUFFER_SLOT;
  198. }
  199. int slot = mCore->mFreeBuffers.front();
  200. mCore->mFreeBuffers.pop_front();
  201. return slot;
  202. }
  203. int BufferQueueProducer::getFreeSlotLocked() const {
  204. if (mCore->mFreeSlots.empty()) {
  205. return BufferQueueCore::INVALID_BUFFER_SLOT;
  206. }
  207. int slot = *(mCore->mFreeSlots.begin());
  208. mCore->mFreeSlots.erase(slot);
  209. return slot;
  210. }
  211. status_t BufferQueueProducer::waitForFreeSlotThenRelock(FreeSlotCaller caller,
  212. std::unique_lock<std::mutex>& lock, int* found) const {
  213. auto callerString = (caller == FreeSlotCaller::Dequeue) ?
  214. "dequeueBuffer" : "attachBuffer";
  215. bool tryAgain = true;
  216. while (tryAgain) {
  217. if (mCore->mIsAbandoned) {
  218. BQ_LOGE("%s: BufferQueue has been abandoned", callerString);
  219. return NO_INIT;
  220. }
  221. int dequeuedCount = 0;
  222. int acquiredCount = 0;
  223. for (int s : mCore->mActiveBuffers) {
  224. if (mSlots[s].mBufferState.isDequeued()) {
  225. ++dequeuedCount;
  226. }
  227. if (mSlots[s].mBufferState.isAcquired()) {
  228. ++acquiredCount;
  229. }
  230. }
  231. // Producers are not allowed to dequeue more than
  232. // mMaxDequeuedBufferCount buffers.
  233. // This check is only done if a buffer has already been queued
  234. if (mCore->mBufferHasBeenQueued &&
  235. dequeuedCount >= mCore->mMaxDequeuedBufferCount) {
  236. // Supress error logs when timeout is non-negative.
  237. if (mDequeueTimeout < 0) {
  238. BQ_LOGE("%s: attempting to exceed the max dequeued buffer "
  239. "count (%d)", callerString,
  240. mCore->mMaxDequeuedBufferCount);
  241. }
  242. return INVALID_OPERATION;
  243. }
  244. *found = BufferQueueCore::INVALID_BUFFER_SLOT;
  245. // If we disconnect and reconnect quickly, we can be in a state where
  246. // our slots are empty but we have many buffers in the queue. This can
  247. // cause us to run out of memory if we outrun the consumer. Wait here if
  248. // it looks like we have too many buffers queued up.
  249. const int maxBufferCount = mCore->getMaxBufferCountLocked();
  250. bool tooManyBuffers = mCore->mQueue.size()
  251. > static_cast<size_t>(maxBufferCount);
  252. if (tooManyBuffers) {
  253. BQ_LOGV("%s: queue size is %zu, waiting", callerString,
  254. mCore->mQueue.size());
  255. } else {
  256. // If in shared buffer mode and a shared buffer exists, always
  257. // return it.
  258. if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot !=
  259. BufferQueueCore::INVALID_BUFFER_SLOT) {
  260. *found = mCore->mSharedBufferSlot;
  261. } else {
  262. if (caller == FreeSlotCaller::Dequeue) {
  263. // If we're calling this from dequeue, prefer free buffers
  264. int slot = getFreeBufferLocked();
  265. if (slot != BufferQueueCore::INVALID_BUFFER_SLOT) {
  266. *found = slot;
  267. } else if (mCore->mAllowAllocation) {
  268. *found = getFreeSlotLocked();
  269. }
  270. } else {
  271. // If we're calling this from attach, prefer free slots
  272. int slot = getFreeSlotLocked();
  273. if (slot != BufferQueueCore::INVALID_BUFFER_SLOT) {
  274. *found = slot;
  275. } else {
  276. *found = getFreeBufferLocked();
  277. }
  278. }
  279. }
  280. }
  281. // If no buffer is found, or if the queue has too many buffers
  282. // outstanding, wait for a buffer to be acquired or released, or for the
  283. // max buffer count to change.
  284. tryAgain = (*found == BufferQueueCore::INVALID_BUFFER_SLOT) ||
  285. tooManyBuffers;
  286. if (tryAgain) {
  287. // Return an error if we're in non-blocking mode (producer and
  288. // consumer are controlled by the application).
  289. // However, the consumer is allowed to briefly acquire an extra
  290. // buffer (which could cause us to have to wait here), which is
  291. // okay, since it is only used to implement an atomic acquire +
  292. // release (e.g., in GLConsumer::updateTexImage())
  293. if ((mCore->mDequeueBufferCannotBlock || mCore->mAsyncMode) &&
  294. (acquiredCount <= mCore->mMaxAcquiredBufferCount)) {
  295. return WOULD_BLOCK;
  296. }
  297. if (mDequeueTimeout >= 0) {
  298. std::cv_status result = mCore->mDequeueCondition.wait_for(lock,
  299. std::chrono::nanoseconds(mDequeueTimeout));
  300. if (result == std::cv_status::timeout) {
  301. return TIMED_OUT;
  302. }
  303. } else {
  304. mCore->mDequeueCondition.wait(lock);
  305. }
  306. }
  307. } // while (tryAgain)
  308. return NO_ERROR;
  309. }
  310. status_t BufferQueueProducer::dequeueBuffer(int* outSlot, sp<android::Fence>* outFence,
  311. uint32_t width, uint32_t height, PixelFormat format,
  312. uint64_t usage, uint64_t* outBufferAge,
  313. FrameEventHistoryDelta* outTimestamps) {
  314. ATRACE_CALL();
  315. { // Autolock scope
  316. std::lock_guard<std::mutex> lock(mCore->mMutex);
  317. mConsumerName = mCore->mConsumerName;
  318. if (mCore->mIsAbandoned) {
  319. BQ_LOGE("dequeueBuffer: BufferQueue has been abandoned");
  320. return NO_INIT;
  321. }
  322. if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
  323. BQ_LOGE("dequeueBuffer: BufferQueue has no connected producer");
  324. return NO_INIT;
  325. }
  326. } // Autolock scope
  327. BQ_LOGV("dequeueBuffer: w=%u h=%u format=%#x, usage=%#" PRIx64, width, height, format, usage);
  328. if ((width && !height) || (!width && height)) {
  329. BQ_LOGE("dequeueBuffer: invalid size: w=%u h=%u", width, height);
  330. return BAD_VALUE;
  331. }
  332. status_t returnFlags = NO_ERROR;
  333. EGLDisplay eglDisplay = EGL_NO_DISPLAY;
  334. EGLSyncKHR eglFence = EGL_NO_SYNC_KHR;
  335. bool attachedByConsumer = false;
  336. { // Autolock scope
  337. std::unique_lock<std::mutex> lock(mCore->mMutex);
  338. // If we don't have a free buffer, but we are currently allocating, we wait until allocation
  339. // is finished such that we don't allocate in parallel.
  340. if (mCore->mFreeBuffers.empty() && mCore->mIsAllocating) {
  341. mDequeueWaitingForAllocation = true;
  342. mCore->waitWhileAllocatingLocked(lock);
  343. mDequeueWaitingForAllocation = false;
  344. mDequeueWaitingForAllocationCondition.notify_all();
  345. }
  346. if (format == 0) {
  347. format = mCore->mDefaultBufferFormat;
  348. }
  349. // Enable the usage bits the consumer requested
  350. usage |= mCore->mConsumerUsageBits;
  351. const bool useDefaultSize = !width && !height;
  352. if (useDefaultSize) {
  353. width = mCore->mDefaultWidth;
  354. height = mCore->mDefaultHeight;
  355. }
  356. int found = BufferItem::INVALID_BUFFER_SLOT;
  357. while (found == BufferItem::INVALID_BUFFER_SLOT) {
  358. status_t status = waitForFreeSlotThenRelock(FreeSlotCaller::Dequeue, lock, &found);
  359. if (status != NO_ERROR) {
  360. return status;
  361. }
  362. // This should not happen
  363. if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
  364. BQ_LOGE("dequeueBuffer: no available buffer slots");
  365. return -EBUSY;
  366. }
  367. const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);
  368. // If we are not allowed to allocate new buffers,
  369. // waitForFreeSlotThenRelock must have returned a slot containing a
  370. // buffer. If this buffer would require reallocation to meet the
  371. // requested attributes, we free it and attempt to get another one.
  372. if (!mCore->mAllowAllocation) {
  373. if (buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage)) {
  374. if (mCore->mSharedBufferSlot == found) {
  375. BQ_LOGE("dequeueBuffer: cannot re-allocate a sharedbuffer");
  376. return BAD_VALUE;
  377. }
  378. mCore->mFreeSlots.insert(found);
  379. mCore->clearBufferSlotLocked(found);
  380. found = BufferItem::INVALID_BUFFER_SLOT;
  381. continue;
  382. }
  383. }
  384. }
  385. const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);
  386. if (mCore->mSharedBufferSlot == found &&
  387. buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage)) {
  388. BQ_LOGE("dequeueBuffer: cannot re-allocate a shared"
  389. "buffer");
  390. return BAD_VALUE;
  391. }
  392. if (mCore->mSharedBufferSlot != found) {
  393. mCore->mActiveBuffers.insert(found);
  394. }
  395. *outSlot = found;
  396. ATRACE_BUFFER_INDEX(found);
  397. attachedByConsumer = mSlots[found].mNeedsReallocation;
  398. mSlots[found].mNeedsReallocation = false;
  399. mSlots[found].mBufferState.dequeue();
  400. if ((buffer == nullptr) ||
  401. buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage))
  402. {
  403. mSlots[found].mAcquireCalled = false;
  404. mSlots[found].mGraphicBuffer = nullptr;
  405. mSlots[found].mRequestBufferCalled = false;
  406. mSlots[found].mEglDisplay = EGL_NO_DISPLAY;
  407. mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
  408. mSlots[found].mFence = Fence::NO_FENCE;
  409. mCore->mBufferAge = 0;
  410. mCore->mIsAllocating = true;
  411. returnFlags |= BUFFER_NEEDS_REALLOCATION;
  412. } else {
  413. // We add 1 because that will be the frame number when this buffer
  414. // is queued
  415. mCore->mBufferAge = mCore->mFrameCounter + 1 - mSlots[found].mFrameNumber;
  416. }
  417. BQ_LOGV("dequeueBuffer: setting buffer age to %" PRIu64,
  418. mCore->mBufferAge);
  419. if (CC_UNLIKELY(mSlots[found].mFence == nullptr)) {
  420. BQ_LOGE("dequeueBuffer: about to return a NULL fence - "
  421. "slot=%d w=%d h=%d format=%u",
  422. found, buffer->width, buffer->height, buffer->format);
  423. }
  424. eglDisplay = mSlots[found].mEglDisplay;
  425. eglFence = mSlots[found].mEglFence;
  426. // Don't return a fence in shared buffer mode, except for the first
  427. // frame.
  428. *outFence = (mCore->mSharedBufferMode &&
  429. mCore->mSharedBufferSlot == found) ?
  430. Fence::NO_FENCE : mSlots[found].mFence;
  431. mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
  432. mSlots[found].mFence = Fence::NO_FENCE;
  433. // If shared buffer mode has just been enabled, cache the slot of the
  434. // first buffer that is dequeued and mark it as the shared buffer.
  435. if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot ==
  436. BufferQueueCore::INVALID_BUFFER_SLOT) {
  437. mCore->mSharedBufferSlot = found;
  438. mSlots[found].mBufferState.mShared = true;
  439. }
  440. } // Autolock scope
  441. if (returnFlags & BUFFER_NEEDS_REALLOCATION) {
  442. BQ_LOGV("dequeueBuffer: allocating a new buffer for slot %d", *outSlot);
  443. sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
  444. width, height, format, BQ_LAYER_COUNT, usage,
  445. {mConsumerName.string(), mConsumerName.size()});
  446. status_t error = graphicBuffer->initCheck();
  447. { // Autolock scope
  448. std::lock_guard<std::mutex> lock(mCore->mMutex);
  449. if (error == NO_ERROR && !mCore->mIsAbandoned) {
  450. graphicBuffer->setGenerationNumber(mCore->mGenerationNumber);
  451. mSlots[*outSlot].mGraphicBuffer = graphicBuffer;
  452. }
  453. mCore->mIsAllocating = false;
  454. mCore->mIsAllocatingCondition.notify_all();
  455. if (error != NO_ERROR) {
  456. mCore->mFreeSlots.insert(*outSlot);
  457. mCore->clearBufferSlotLocked(*outSlot);
  458. BQ_LOGE("dequeueBuffer: createGraphicBuffer failed");
  459. return error;
  460. }
  461. if (mCore->mIsAbandoned) {
  462. mCore->mFreeSlots.insert(*outSlot);
  463. mCore->clearBufferSlotLocked(*outSlot);
  464. BQ_LOGE("dequeueBuffer: BufferQueue has been abandoned");
  465. return NO_INIT;
  466. }
  467. VALIDATE_CONSISTENCY();
  468. } // Autolock scope
  469. }
  470. if (attachedByConsumer) {
  471. returnFlags |= BUFFER_NEEDS_REALLOCATION;
  472. }
  473. if (eglFence != EGL_NO_SYNC_KHR) {
  474. EGLint result = eglClientWaitSyncKHR(eglDisplay, eglFence, 0,
  475. 1000000000);
  476. // If something goes wrong, log the error, but return the buffer without
  477. // synchronizing access to it. It's too late at this point to abort the
  478. // dequeue operation.
  479. if (result == EGL_FALSE) {
  480. BQ_LOGE("dequeueBuffer: error %#x waiting for fence",
  481. eglGetError());
  482. } else if (result == EGL_TIMEOUT_EXPIRED_KHR) {
  483. BQ_LOGE("dequeueBuffer: timeout waiting for fence");
  484. }
  485. eglDestroySyncKHR(eglDisplay, eglFence);
  486. }
  487. BQ_LOGV("dequeueBuffer: returning slot=%d/%" PRIu64 " buf=%p flags=%#x",
  488. *outSlot,
  489. mSlots[*outSlot].mFrameNumber,
  490. mSlots[*outSlot].mGraphicBuffer->handle, returnFlags);
  491. if (outBufferAge) {
  492. *outBufferAge = mCore->mBufferAge;
  493. }
  494. addAndGetFrameTimestamps(nullptr, outTimestamps);
  495. return returnFlags;
  496. }
  497. status_t BufferQueueProducer::detachBuffer(int slot) {
  498. ATRACE_CALL();
  499. ATRACE_BUFFER_INDEX(slot);
  500. BQ_LOGV("detachBuffer: slot %d", slot);
  501. sp<IConsumerListener> listener;
  502. {
  503. std::lock_guard<std::mutex> lock(mCore->mMutex);
  504. if (mCore->mIsAbandoned) {
  505. BQ_LOGE("detachBuffer: BufferQueue has been abandoned");
  506. return NO_INIT;
  507. }
  508. if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
  509. BQ_LOGE("detachBuffer: BufferQueue has no connected producer");
  510. return NO_INIT;
  511. }
  512. if (mCore->mSharedBufferMode || mCore->mSharedBufferSlot == slot) {
  513. BQ_LOGE("detachBuffer: cannot detach a buffer in shared buffer mode");
  514. return BAD_VALUE;
  515. }
  516. if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
  517. BQ_LOGE("detachBuffer: slot index %d out of range [0, %d)",
  518. slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
  519. return BAD_VALUE;
  520. } else if (!mSlots[slot].mBufferState.isDequeued()) {
  521. BQ_LOGE("detachBuffer: slot %d is not owned by the producer "
  522. "(state = %s)", slot, mSlots[slot].mBufferState.string());
  523. return BAD_VALUE;
  524. } else if (!mSlots[slot].mRequestBufferCalled) {
  525. BQ_LOGE("detachBuffer: buffer in slot %d has not been requested",
  526. slot);
  527. return BAD_VALUE;
  528. }
  529. mSlots[slot].mBufferState.detachProducer();
  530. mCore->mActiveBuffers.erase(slot);
  531. mCore->mFreeSlots.insert(slot);
  532. mCore->clearBufferSlotLocked(slot);
  533. mCore->mDequeueCondition.notify_all();
  534. VALIDATE_CONSISTENCY();
  535. listener = mCore->mConsumerListener;
  536. }
  537. if (listener != nullptr) {
  538. listener->onBuffersReleased();
  539. }
  540. return NO_ERROR;
  541. }
  542. status_t BufferQueueProducer::detachNextBuffer(sp<GraphicBuffer>* outBuffer,
  543. sp<Fence>* outFence) {
  544. ATRACE_CALL();
  545. if (outBuffer == nullptr) {
  546. BQ_LOGE("detachNextBuffer: outBuffer must not be NULL");
  547. return BAD_VALUE;
  548. } else if (outFence == nullptr) {
  549. BQ_LOGE("detachNextBuffer: outFence must not be NULL");
  550. return BAD_VALUE;
  551. }
  552. sp<IConsumerListener> listener;
  553. {
  554. std::unique_lock<std::mutex> lock(mCore->mMutex);
  555. if (mCore->mIsAbandoned) {
  556. BQ_LOGE("detachNextBuffer: BufferQueue has been abandoned");
  557. return NO_INIT;
  558. }
  559. if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
  560. BQ_LOGE("detachNextBuffer: BufferQueue has no connected producer");
  561. return NO_INIT;
  562. }
  563. if (mCore->mSharedBufferMode) {
  564. BQ_LOGE("detachNextBuffer: cannot detach a buffer in shared buffer "
  565. "mode");
  566. return BAD_VALUE;
  567. }
  568. mCore->waitWhileAllocatingLocked(lock);
  569. if (mCore->mFreeBuffers.empty()) {
  570. return NO_MEMORY;
  571. }
  572. int found = mCore->mFreeBuffers.front();
  573. mCore->mFreeBuffers.remove(found);
  574. mCore->mFreeSlots.insert(found);
  575. BQ_LOGV("detachNextBuffer detached slot %d", found);
  576. *outBuffer = mSlots[found].mGraphicBuffer;
  577. *outFence = mSlots[found].mFence;
  578. mCore->clearBufferSlotLocked(found);
  579. VALIDATE_CONSISTENCY();
  580. listener = mCore->mConsumerListener;
  581. }
  582. if (listener != nullptr) {
  583. listener->onBuffersReleased();
  584. }
  585. return NO_ERROR;
  586. }
  587. status_t BufferQueueProducer::attachBuffer(int* outSlot,
  588. const sp<android::GraphicBuffer>& buffer) {
  589. ATRACE_CALL();
  590. if (outSlot == nullptr) {
  591. BQ_LOGE("attachBuffer: outSlot must not be NULL");
  592. return BAD_VALUE;
  593. } else if (buffer == nullptr) {
  594. BQ_LOGE("attachBuffer: cannot attach NULL buffer");
  595. return BAD_VALUE;
  596. }
  597. std::unique_lock<std::mutex> lock(mCore->mMutex);
  598. if (mCore->mIsAbandoned) {
  599. BQ_LOGE("attachBuffer: BufferQueue has been abandoned");
  600. return NO_INIT;
  601. }
  602. if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
  603. BQ_LOGE("attachBuffer: BufferQueue has no connected producer");
  604. return NO_INIT;
  605. }
  606. if (mCore->mSharedBufferMode) {
  607. BQ_LOGE("attachBuffer: cannot attach a buffer in shared buffer mode");
  608. return BAD_VALUE;
  609. }
  610. if (buffer->getGenerationNumber() != mCore->mGenerationNumber) {
  611. BQ_LOGE("attachBuffer: generation number mismatch [buffer %u] "
  612. "[queue %u]", buffer->getGenerationNumber(),
  613. mCore->mGenerationNumber);
  614. return BAD_VALUE;
  615. }
  616. mCore->waitWhileAllocatingLocked(lock);
  617. status_t returnFlags = NO_ERROR;
  618. int found;
  619. status_t status = waitForFreeSlotThenRelock(FreeSlotCaller::Attach, lock, &found);
  620. if (status != NO_ERROR) {
  621. return status;
  622. }
  623. // This should not happen
  624. if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
  625. BQ_LOGE("attachBuffer: no available buffer slots");
  626. return -EBUSY;
  627. }
  628. *outSlot = found;
  629. ATRACE_BUFFER_INDEX(*outSlot);
  630. BQ_LOGV("attachBuffer: returning slot %d flags=%#x",
  631. *outSlot, returnFlags);
  632. mSlots[*outSlot].mGraphicBuffer = buffer;
  633. mSlots[*outSlot].mBufferState.attachProducer();
  634. mSlots[*outSlot].mEglFence = EGL_NO_SYNC_KHR;
  635. mSlots[*outSlot].mFence = Fence::NO_FENCE;
  636. mSlots[*outSlot].mRequestBufferCalled = true;
  637. mSlots[*outSlot].mAcquireCalled = false;
  638. mSlots[*outSlot].mNeedsReallocation = false;
  639. mCore->mActiveBuffers.insert(found);
  640. VALIDATE_CONSISTENCY();
  641. return returnFlags;
  642. }
  643. status_t BufferQueueProducer::queueBuffer(int slot,
  644. const QueueBufferInput &input, QueueBufferOutput *output) {
  645. ATRACE_CALL();
  646. ATRACE_BUFFER_INDEX(slot);
  647. int64_t requestedPresentTimestamp;
  648. bool isAutoTimestamp;
  649. android_dataspace dataSpace;
  650. Rect crop(Rect::EMPTY_RECT);
  651. int scalingMode;
  652. uint32_t transform;
  653. uint32_t stickyTransform;
  654. sp<Fence> acquireFence;
  655. bool getFrameTimestamps = false;
  656. input.deflate(&requestedPresentTimestamp, &isAutoTimestamp, &dataSpace,
  657. &crop, &scalingMode, &transform, &acquireFence, &stickyTransform,
  658. &getFrameTimestamps);
  659. const Region& surfaceDamage = input.getSurfaceDamage();
  660. const HdrMetadata& hdrMetadata = input.getHdrMetadata();
  661. if (acquireFence == nullptr) {
  662. BQ_LOGE("queueBuffer: fence is NULL");
  663. return BAD_VALUE;
  664. }
  665. auto acquireFenceTime = std::make_shared<FenceTime>(acquireFence);
  666. switch (scalingMode) {
  667. case NATIVE_WINDOW_SCALING_MODE_FREEZE:
  668. case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
  669. case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
  670. case NATIVE_WINDOW_SCALING_MODE_NO_SCALE_CROP:
  671. break;
  672. default:
  673. BQ_LOGE("queueBuffer: unknown scaling mode %d", scalingMode);
  674. return BAD_VALUE;
  675. }
  676. sp<IConsumerListener> frameAvailableListener;
  677. sp<IConsumerListener> frameReplacedListener;
  678. int callbackTicket = 0;
  679. uint64_t currentFrameNumber = 0;
  680. BufferItem item;
  681. { // Autolock scope
  682. std::lock_guard<std::mutex> lock(mCore->mMutex);
  683. if (mCore->mIsAbandoned) {
  684. BQ_LOGE("queueBuffer: BufferQueue has been abandoned");
  685. return NO_INIT;
  686. }
  687. if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
  688. BQ_LOGE("queueBuffer: BufferQueue has no connected producer");
  689. return NO_INIT;
  690. }
  691. if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
  692. BQ_LOGE("queueBuffer: slot index %d out of range [0, %d)",
  693. slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
  694. return BAD_VALUE;
  695. } else if (!mSlots[slot].mBufferState.isDequeued()) {
  696. BQ_LOGE("queueBuffer: slot %d is not owned by the producer "
  697. "(state = %s)", slot, mSlots[slot].mBufferState.string());
  698. return BAD_VALUE;
  699. } else if (!mSlots[slot].mRequestBufferCalled) {
  700. BQ_LOGE("queueBuffer: slot %d was queued without requesting "
  701. "a buffer", slot);
  702. return BAD_VALUE;
  703. }
  704. // If shared buffer mode has just been enabled, cache the slot of the
  705. // first buffer that is queued and mark it as the shared buffer.
  706. if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot ==
  707. BufferQueueCore::INVALID_BUFFER_SLOT) {
  708. mCore->mSharedBufferSlot = slot;
  709. mSlots[slot].mBufferState.mShared = true;
  710. }
  711. BQ_LOGV("queueBuffer: slot=%d/%" PRIu64 " time=%" PRIu64 " dataSpace=%d"
  712. " validHdrMetadataTypes=0x%x crop=[%d,%d,%d,%d] transform=%#x scale=%s",
  713. slot, mCore->mFrameCounter + 1, requestedPresentTimestamp, dataSpace,
  714. hdrMetadata.validTypes, crop.left, crop.top, crop.right, crop.bottom,
  715. transform,
  716. BufferItem::scalingModeName(static_cast<uint32_t>(scalingMode)));
  717. const sp<GraphicBuffer>& graphicBuffer(mSlots[slot].mGraphicBuffer);
  718. Rect bufferRect(graphicBuffer->getWidth(), graphicBuffer->getHeight());
  719. Rect croppedRect(Rect::EMPTY_RECT);
  720. crop.intersect(bufferRect, &croppedRect);
  721. if (croppedRect != crop) {
  722. BQ_LOGE("queueBuffer: crop rect is not contained within the "
  723. "buffer in slot %d", slot);
  724. return BAD_VALUE;
  725. }
  726. // Override UNKNOWN dataspace with consumer default
  727. if (dataSpace == HAL_DATASPACE_UNKNOWN) {
  728. dataSpace = mCore->mDefaultBufferDataSpace;
  729. }
  730. mSlots[slot].mFence = acquireFence;
  731. mSlots[slot].mBufferState.queue();
  732. // Increment the frame counter and store a local version of it
  733. // for use outside the lock on mCore->mMutex.
  734. ++mCore->mFrameCounter;
  735. currentFrameNumber = mCore->mFrameCounter;
  736. mSlots[slot].mFrameNumber = currentFrameNumber;
  737. item.mAcquireCalled = mSlots[slot].mAcquireCalled;
  738. item.mGraphicBuffer = mSlots[slot].mGraphicBuffer;
  739. item.mCrop = crop;
  740. item.mTransform = transform &
  741. ~static_cast<uint32_t>(NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY);
  742. item.mTransformToDisplayInverse =
  743. (transform & NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) != 0;
  744. item.mScalingMode = static_cast<uint32_t>(scalingMode);
  745. item.mTimestamp = requestedPresentTimestamp;
  746. item.mIsAutoTimestamp = isAutoTimestamp;
  747. item.mDataSpace = dataSpace;
  748. item.mHdrMetadata = hdrMetadata;
  749. item.mFrameNumber = currentFrameNumber;
  750. item.mSlot = slot;
  751. item.mFence = acquireFence;
  752. item.mFenceTime = acquireFenceTime;
  753. item.mIsDroppable = mCore->mAsyncMode ||
  754. (mConsumerIsSurfaceFlinger && mCore->mQueueBufferCanDrop) ||
  755. (mCore->mLegacyBufferDrop && mCore->mQueueBufferCanDrop) ||
  756. (mCore->mSharedBufferMode && mCore->mSharedBufferSlot == slot);
  757. item.mSurfaceDamage = surfaceDamage;
  758. item.mQueuedBuffer = true;
  759. item.mAutoRefresh = mCore->mSharedBufferMode && mCore->mAutoRefresh;
  760. item.mApi = mCore->mConnectedApi;
  761. mStickyTransform = stickyTransform;
  762. // Cache the shared buffer data so that the BufferItem can be recreated.
  763. if (mCore->mSharedBufferMode) {
  764. mCore->mSharedBufferCache.crop = crop;
  765. mCore->mSharedBufferCache.transform = transform;
  766. mCore->mSharedBufferCache.scalingMode = static_cast<uint32_t>(
  767. scalingMode);
  768. mCore->mSharedBufferCache.dataspace = dataSpace;
  769. }
  770. output->bufferReplaced = false;
  771. if (mCore->mQueue.empty()) {
  772. // When the queue is empty, we can ignore mDequeueBufferCannotBlock
  773. // and simply queue this buffer
  774. mCore->mQueue.push_back(item);
  775. frameAvailableListener = mCore->mConsumerListener;
  776. } else {
  777. // When the queue is not empty, we need to look at the last buffer
  778. // in the queue to see if we need to replace it
  779. const BufferItem& last = mCore->mQueue.itemAt(
  780. mCore->mQueue.size() - 1);
  781. if (last.mIsDroppable) {
  782. if (!last.mIsStale) {
  783. mSlots[last.mSlot].mBufferState.freeQueued();
  784. // After leaving shared buffer mode, the shared buffer will
  785. // still be around. Mark it as no longer shared if this
  786. // operation causes it to be free.
  787. if (!mCore->mSharedBufferMode &&
  788. mSlots[last.mSlot].mBufferState.isFree()) {
  789. mSlots[last.mSlot].mBufferState.mShared = false;
  790. }
  791. // Don't put the shared buffer on the free list.
  792. if (!mSlots[last.mSlot].mBufferState.isShared()) {
  793. mCore->mActiveBuffers.erase(last.mSlot);
  794. mCore->mFreeBuffers.push_back(last.mSlot);
  795. output->bufferReplaced = true;
  796. }
  797. }
  798. // Make sure to merge the damage rect from the frame we're about
  799. // to drop into the new frame's damage rect.
  800. if (last.mSurfaceDamage.bounds() == Rect::INVALID_RECT ||
  801. item.mSurfaceDamage.bounds() == Rect::INVALID_RECT) {
  802. item.mSurfaceDamage = Region::INVALID_REGION;
  803. } else {
  804. item.mSurfaceDamage |= last.mSurfaceDamage;
  805. }
  806. // Overwrite the droppable buffer with the incoming one
  807. mCore->mQueue.editItemAt(mCore->mQueue.size() - 1) = item;
  808. frameReplacedListener = mCore->mConsumerListener;
  809. } else {
  810. mCore->mQueue.push_back(item);
  811. frameAvailableListener = mCore->mConsumerListener;
  812. }
  813. }
  814. mCore->mBufferHasBeenQueued = true;
  815. mCore->mDequeueCondition.notify_all();
  816. mCore->mLastQueuedSlot = slot;
  817. output->width = mCore->mDefaultWidth;
  818. output->height = mCore->mDefaultHeight;
  819. output->transformHint = mCore->mTransformHint;
  820. output->numPendingBuffers = static_cast<uint32_t>(mCore->mQueue.size());
  821. output->nextFrameNumber = mCore->mFrameCounter + 1;
  822. ATRACE_INT(mCore->mConsumerName.string(),
  823. static_cast<int32_t>(mCore->mQueue.size()));
  824. mCore->mOccupancyTracker.registerOccupancyChange(mCore->mQueue.size());
  825. // Take a ticket for the callback functions
  826. callbackTicket = mNextCallbackTicket++;
  827. VALIDATE_CONSISTENCY();
  828. } // Autolock scope
  829. // It is okay not to clear the GraphicBuffer when the consumer is SurfaceFlinger because
  830. // it is guaranteed that the BufferQueue is inside SurfaceFlinger's process and
  831. // there will be no Binder call
  832. if (!mConsumerIsSurfaceFlinger) {
  833. item.mGraphicBuffer.clear();
  834. }
  835. // Call back without the main BufferQueue lock held, but with the callback
  836. // lock held so we can ensure that callbacks occur in order
  837. int connectedApi;
  838. sp<Fence> lastQueuedFence;
  839. { // scope for the lock
  840. std::unique_lock<std::mutex> lock(mCallbackMutex);
  841. while (callbackTicket != mCurrentCallbackTicket) {
  842. mCallbackCondition.wait(lock);
  843. }
  844. if (frameAvailableListener != nullptr) {
  845. frameAvailableListener->onFrameAvailable(item);
  846. } else if (frameReplacedListener != nullptr) {
  847. frameReplacedListener->onFrameReplaced(item);
  848. }
  849. connectedApi = mCore->mConnectedApi;
  850. lastQueuedFence = std::move(mLastQueueBufferFence);
  851. mLastQueueBufferFence = std::move(acquireFence);
  852. mLastQueuedCrop = item.mCrop;
  853. mLastQueuedTransform = item.mTransform;
  854. ++mCurrentCallbackTicket;
  855. mCallbackCondition.notify_all();
  856. }
  857. // Update and get FrameEventHistory.
  858. nsecs_t postedTime = systemTime(SYSTEM_TIME_MONOTONIC);
  859. NewFrameEventsEntry newFrameEventsEntry = {
  860. currentFrameNumber,
  861. postedTime,
  862. requestedPresentTimestamp,
  863. std::move(acquireFenceTime)
  864. };
  865. addAndGetFrameTimestamps(&newFrameEventsEntry,
  866. getFrameTimestamps ? &output->frameTimestamps : nullptr);
  867. // Wait without lock held
  868. if (connectedApi == NATIVE_WINDOW_API_EGL) {
  869. // Waiting here allows for two full buffers to be queued but not a
  870. // third. In the event that frames take varying time, this makes a
  871. // small trade-off in favor of latency rather than throughput.
  872. lastQueuedFence->waitForever("Throttling EGL Production");
  873. }
  874. return NO_ERROR;
  875. }
  876. status_t BufferQueueProducer::cancelBuffer(int slot, const sp<Fence>& fence) {
  877. ATRACE_CALL();
  878. BQ_LOGV("cancelBuffer: slot %d", slot);
  879. std::lock_guard<std::mutex> lock(mCore->mMutex);
  880. if (mCore->mIsAbandoned) {
  881. BQ_LOGE("cancelBuffer: BufferQueue has been abandoned");
  882. return NO_INIT;
  883. }
  884. if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
  885. BQ_LOGE("cancelBuffer: BufferQueue has no connected producer");
  886. return NO_INIT;
  887. }
  888. if (mCore->mSharedBufferMode) {
  889. BQ_LOGE("cancelBuffer: cannot cancel a buffer in shared buffer mode");
  890. return BAD_VALUE;
  891. }
  892. if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
  893. BQ_LOGE("cancelBuffer: slot index %d out of range [0, %d)",
  894. slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
  895. return BAD_VALUE;
  896. } else if (!mSlots[slot].mBufferState.isDequeued()) {
  897. BQ_LOGE("cancelBuffer: slot %d is not owned by the producer "
  898. "(state = %s)", slot, mSlots[slot].mBufferState.string());
  899. return BAD_VALUE;
  900. } else if (fence == nullptr) {
  901. BQ_LOGE("cancelBuffer: fence is NULL");
  902. return BAD_VALUE;
  903. }
  904. mSlots[slot].mBufferState.cancel();
  905. // After leaving shared buffer mode, the shared buffer will still be around.
  906. // Mark it as no longer shared if this operation causes it to be free.
  907. if (!mCore->mSharedBufferMode && mSlots[slot].mBufferState.isFree()) {
  908. mSlots[slot].mBufferState.mShared = false;
  909. }
  910. // Don't put the shared buffer on the free list.
  911. if (!mSlots[slot].mBufferState.isShared()) {
  912. mCore->mActiveBuffers.erase(slot);
  913. mCore->mFreeBuffers.push_back(slot);
  914. }
  915. mSlots[slot].mFence = fence;
  916. mCore->mDequeueCondition.notify_all();
  917. VALIDATE_CONSISTENCY();
  918. return NO_ERROR;
  919. }
  920. int BufferQueueProducer::query(int what, int *outValue) {
  921. ATRACE_CALL();
  922. std::lock_guard<std::mutex> lock(mCore->mMutex);
  923. if (outValue == nullptr) {
  924. BQ_LOGE("query: outValue was NULL");
  925. return BAD_VALUE;
  926. }
  927. if (mCore->mIsAbandoned) {
  928. BQ_LOGE("query: BufferQueue has been abandoned");
  929. return NO_INIT;
  930. }
  931. int value;
  932. switch (what) {
  933. case NATIVE_WINDOW_WIDTH:
  934. value = static_cast<int32_t>(mCore->mDefaultWidth);
  935. break;
  936. case NATIVE_WINDOW_HEIGHT:
  937. value = static_cast<int32_t>(mCore->mDefaultHeight);
  938. break;
  939. case NATIVE_WINDOW_FORMAT:
  940. value = static_cast<int32_t>(mCore->mDefaultBufferFormat);
  941. break;
  942. case NATIVE_WINDOW_LAYER_COUNT:
  943. // All BufferQueue buffers have a single layer.
  944. value = BQ_LAYER_COUNT;
  945. break;
  946. case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
  947. value = mCore->getMinUndequeuedBufferCountLocked();
  948. break;
  949. case NATIVE_WINDOW_STICKY_TRANSFORM:
  950. value = static_cast<int32_t>(mStickyTransform);
  951. break;
  952. case NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND:
  953. value = (mCore->mQueue.size() > 1);
  954. break;
  955. case NATIVE_WINDOW_CONSUMER_USAGE_BITS:
  956. // deprecated; higher 32 bits are truncated
  957. value = static_cast<int32_t>(mCore->mConsumerUsageBits);
  958. break;
  959. case NATIVE_WINDOW_DEFAULT_DATASPACE:
  960. value = static_cast<int32_t>(mCore->mDefaultBufferDataSpace);
  961. break;
  962. case NATIVE_WINDOW_BUFFER_AGE:
  963. if (mCore->mBufferAge > INT32_MAX) {
  964. value = 0;
  965. } else {
  966. value = static_cast<int32_t>(mCore->mBufferAge);
  967. }
  968. break;
  969. case NATIVE_WINDOW_CONSUMER_IS_PROTECTED:
  970. value = static_cast<int32_t>(mCore->mConsumerIsProtected);
  971. break;
  972. case NATIVE_WINDOW_MAX_BUFFER_COUNT:
  973. value = static_cast<int32_t>(mCore->mMaxBufferCount);
  974. break;
  975. default:
  976. return BAD_VALUE;
  977. }
  978. BQ_LOGV("query: %d? %d", what, value);
  979. *outValue = value;
  980. return NO_ERROR;
  981. }
  982. status_t BufferQueueProducer::connect(const sp<IProducerListener>& listener,
  983. int api, bool producerControlledByApp, QueueBufferOutput *output) {
  984. ATRACE_CALL();
  985. std::lock_guard<std::mutex> lock(mCore->mMutex);
  986. mConsumerName = mCore->mConsumerName;
  987. BQ_LOGV("connect: api=%d producerControlledByApp=%s", api,
  988. producerControlledByApp ? "true" : "false");
  989. if (mCore->mIsAbandoned) {
  990. BQ_LOGE("connect: BufferQueue has been abandoned");
  991. return NO_INIT;
  992. }
  993. if (mCore->mConsumerListener == nullptr) {
  994. BQ_LOGE("connect: BufferQueue has no consumer");
  995. return NO_INIT;
  996. }
  997. if (output == nullptr) {
  998. BQ_LOGE("connect: output was NULL");
  999. return BAD_VALUE;
  1000. }
  1001. if (mCore->mConnectedApi != BufferQueueCore::NO_CONNECTED_API) {
  1002. BQ_LOGE("connect: already connected (cur=%d req=%d)",
  1003. mCore->mConnectedApi, api);
  1004. return BAD_VALUE;
  1005. }
  1006. int delta = mCore->getMaxBufferCountLocked(mCore->mAsyncMode,
  1007. mDequeueTimeout < 0 ?
  1008. mCore->mConsumerControlledByApp && producerControlledByApp : false,
  1009. mCore->mMaxBufferCount) -
  1010. mCore->getMaxBufferCountLocked();
  1011. if (!mCore->adjustAvailableSlotsLocked(delta)) {
  1012. BQ_LOGE("connect: BufferQueue failed to adjust the number of available "
  1013. "slots. Delta = %d", delta);
  1014. return BAD_VALUE;
  1015. }
  1016. int status = NO_ERROR;
  1017. switch (api) {
  1018. case NATIVE_WINDOW_API_EGL:
  1019. case NATIVE_WINDOW_API_CPU:
  1020. case NATIVE_WINDOW_API_MEDIA:
  1021. case NATIVE_WINDOW_API_CAMERA:
  1022. mCore->mConnectedApi = api;
  1023. output->width = mCore->mDefaultWidth;
  1024. output->height = mCore->mDefaultHeight;
  1025. output->transformHint = mCore->mTransformHint;
  1026. output->numPendingBuffers =
  1027. static_cast<uint32_t>(mCore->mQueue.size());
  1028. output->nextFrameNumber = mCore->mFrameCounter + 1;
  1029. output->bufferReplaced = false;
  1030. if (listener != nullptr) {
  1031. // Set up a death notification so that we can disconnect
  1032. // automatically if the remote producer dies
  1033. if (IInterface::asBinder(listener)->remoteBinder() != nullptr) {
  1034. status = IInterface::asBinder(listener)->linkToDeath(
  1035. static_cast<IBinder::DeathRecipient*>(this));
  1036. if (status != NO_ERROR) {
  1037. BQ_LOGE("connect: linkToDeath failed: %s (%d)",
  1038. strerror(-status), status);
  1039. }
  1040. mCore->mLinkedToDeath = listener;
  1041. }
  1042. mCore->mConnectedProducerListener = listener;
  1043. mCore->mBufferReleasedCbEnabled = listener->needsReleaseNotify();
  1044. }
  1045. break;
  1046. default:
  1047. BQ_LOGE("connect: unknown API %d", api);
  1048. status = BAD_VALUE;
  1049. break;
  1050. }
  1051. mCore->mConnectedPid = BufferQueueThreadState::getCallingPid();
  1052. mCore->mBufferHasBeenQueued = false;
  1053. mCore->mDequeueBufferCannotBlock = false;
  1054. mCore->mQueueBufferCanDrop = false;
  1055. mCore->mLegacyBufferDrop = true;
  1056. if (mCore->mConsumerControlledByApp && producerControlledByApp) {
  1057. mCore->mDequeueBufferCannotBlock = mDequeueTimeout < 0;
  1058. mCore->mQueueBufferCanDrop = mDequeueTimeout <= 0;
  1059. }
  1060. mCore->mAllowAllocation = true;
  1061. VALIDATE_CONSISTENCY();
  1062. return status;
  1063. }
  1064. status_t BufferQueueProducer::disconnect(int api, DisconnectMode mode) {
  1065. ATRACE_CALL();
  1066. BQ_LOGV("disconnect: api %d", api);
  1067. int status = NO_ERROR;
  1068. sp<IConsumerListener> listener;
  1069. { // Autolock scope
  1070. std::unique_lock<std::mutex> lock(mCore->mMutex);
  1071. if (mode == DisconnectMode::AllLocal) {
  1072. if (BufferQueueThreadState::getCallingPid() != mCore->mConnectedPid) {
  1073. return NO_ERROR;
  1074. }
  1075. api = BufferQueueCore::CURRENTLY_CONNECTED_API;
  1076. }
  1077. mCore->waitWhileAllocatingLocked(lock);
  1078. if (mCore->mIsAbandoned) {
  1079. // It's not really an error to disconnect after the surface has
  1080. // been abandoned; it should just be a no-op.
  1081. return NO_ERROR;
  1082. }
  1083. if (api == BufferQueueCore::CURRENTLY_CONNECTED_API) {
  1084. if (mCore->mConnectedApi == NATIVE_WINDOW_API_MEDIA) {
  1085. ALOGD("About to force-disconnect API_MEDIA, mode=%d", mode);
  1086. }
  1087. api = mCore->mConnectedApi;
  1088. // If we're asked to disconnect the currently connected api but
  1089. // nobody is connected, it's not really an error.
  1090. if (api == BufferQueueCore::NO_CONNECTED_API) {
  1091. return NO_ERROR;
  1092. }
  1093. }
  1094. switch (api) {
  1095. case NATIVE_WINDOW_API_EGL:
  1096. case NATIVE_WINDOW_API_CPU:
  1097. case NATIVE_WINDOW_API_MEDIA:
  1098. case NATIVE_WINDOW_API_CAMERA:
  1099. if (mCore->mConnectedApi == api) {
  1100. mCore->freeAllBuffersLocked();
  1101. // Remove our death notification callback if we have one
  1102. if (mCore->mLinkedToDeath != nullptr) {
  1103. sp<IBinder> token =
  1104. IInterface::asBinder(mCore->mLinkedToDeath);
  1105. // This can fail if we're here because of the death
  1106. // notification, but we just ignore it
  1107. token->unlinkToDeath(
  1108. static_cast<IBinder::DeathRecipient*>(this));
  1109. }
  1110. mCore->mSharedBufferSlot =
  1111. BufferQueueCore::INVALID_BUFFER_SLOT;
  1112. mCore->mLinkedToDeath = nullptr;
  1113. mCore->mConnectedProducerListener = nullptr;
  1114. mCore->mConnectedApi = BufferQueueCore::NO_CONNECTED_API;
  1115. mCore->mConnectedPid = -1;
  1116. mCore->mSidebandStream.clear();
  1117. mCore->mDequeueCondition.notify_all();
  1118. listener = mCore->mConsumerListener;
  1119. } else if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
  1120. BQ_LOGE("disconnect: not connected (req=%d)", api);
  1121. status = NO_INIT;
  1122. } else {
  1123. BQ_LOGE("disconnect: still connected to another API "
  1124. "(cur=%d req=%d)", mCore->mConnectedApi, api);
  1125. status = BAD_VALUE;
  1126. }
  1127. break;
  1128. default:
  1129. BQ_LOGE("disconnect: unknown API %d", api);
  1130. status = BAD_VALUE;
  1131. break;
  1132. }
  1133. } // Autolock scope
  1134. // Call back without lock held
  1135. if (listener != nullptr) {
  1136. listener->onBuffersReleased();
  1137. listener->onDisconnect();
  1138. }
  1139. return status;
  1140. }
  1141. status_t BufferQueueProducer::setSidebandStream(const sp<NativeHandle>& stream) {
  1142. sp<IConsumerListener> listener;
  1143. { // Autolock scope
  1144. std::lock_guard<std::mutex> _l(mCore->mMutex);
  1145. mCore->mSidebandStream = stream;
  1146. listener = mCore->mConsumerListener;
  1147. } // Autolock scope
  1148. if (listener != nullptr) {
  1149. listener->onSidebandStreamChanged();
  1150. }
  1151. return NO_ERROR;
  1152. }
  1153. void BufferQueueProducer::allocateBuffers(uint32_t width, uint32_t height,
  1154. PixelFormat format, uint64_t usage) {
  1155. ATRACE_CALL();
  1156. while (true) {
  1157. size_t newBufferCount = 0;
  1158. uint32_t allocWidth = 0;
  1159. uint32_t allocHeight = 0;
  1160. PixelFormat allocFormat = PIXEL_FORMAT_UNKNOWN;
  1161. uint64_t allocUsage = 0;
  1162. std::string allocName;
  1163. { // Autolock scope
  1164. std::unique_lock<std::mutex> lock(mCore->mMutex);
  1165. mCore->waitWhileAllocatingLocked(lock);
  1166. if (!mCore->mAllowAllocation) {
  1167. BQ_LOGE("allocateBuffers: allocation is not allowed for this "
  1168. "BufferQueue");
  1169. return;
  1170. }
  1171. // Only allocate one buffer at a time to reduce risks of overlapping an allocation from
  1172. // both allocateBuffers and dequeueBuffer.
  1173. newBufferCount = mCore->mFreeSlots.empty() ? 0 : 1;
  1174. if (newBufferCount == 0) {
  1175. return;
  1176. }
  1177. allocWidth = width > 0 ? width : mCore->mDefaultWidth;
  1178. allocHeight = height > 0 ? height : mCore->mDefaultHeight;
  1179. allocFormat = format != 0 ? format : mCore->mDefaultBufferFormat;
  1180. allocUsage = usage | mCore->mConsumerUsageBits;
  1181. allocName.assign(mCore->mConsumerName.string(), mCore->mConsumerName.size());
  1182. mCore->mIsAllocating = true;
  1183. } // Autolock scope
  1184. Vector<sp<GraphicBuffer>> buffers;
  1185. for (size_t i = 0; i < newBufferCount; ++i) {
  1186. sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
  1187. allocWidth, allocHeight, allocFormat, BQ_LAYER_COUNT,
  1188. allocUsage, allocName);
  1189. status_t result = graphicBuffer->initCheck();
  1190. if (result != NO_ERROR) {
  1191. BQ_LOGE("allocateBuffers: failed to allocate buffer (%u x %u, format"
  1192. " %u, usage %#" PRIx64 ")", width, height, format, usage);
  1193. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1194. mCore->mIsAllocating = false;
  1195. mCore->mIsAllocatingCondition.notify_all();
  1196. return;
  1197. }
  1198. buffers.push_back(graphicBuffer);
  1199. }
  1200. { // Autolock scope
  1201. std::unique_lock<std::mutex> lock(mCore->mMutex);
  1202. uint32_t checkWidth = width > 0 ? width : mCore->mDefaultWidth;
  1203. uint32_t checkHeight = height > 0 ? height : mCore->mDefaultHeight;
  1204. PixelFormat checkFormat = format != 0 ?
  1205. format : mCore->mDefaultBufferFormat;
  1206. uint64_t checkUsage = usage | mCore->mConsumerUsageBits;
  1207. if (checkWidth != allocWidth || checkHeight != allocHeight ||
  1208. checkFormat != allocFormat || checkUsage != allocUsage) {
  1209. // Something changed while we released the lock. Retry.
  1210. BQ_LOGV("allocateBuffers: size/format/usage changed while allocating. Retrying.");
  1211. mCore->mIsAllocating = false;
  1212. mCore->mIsAllocatingCondition.notify_all();
  1213. continue;
  1214. }
  1215. for (size_t i = 0; i < newBufferCount; ++i) {
  1216. if (mCore->mFreeSlots.empty()) {
  1217. BQ_LOGV("allocateBuffers: a slot was occupied while "
  1218. "allocating. Dropping allocated buffer.");
  1219. continue;
  1220. }
  1221. auto slot = mCore->mFreeSlots.begin();
  1222. mCore->clearBufferSlotLocked(*slot); // Clean up the slot first
  1223. mSlots[*slot].mGraphicBuffer = buffers[i];
  1224. mSlots[*slot].mFence = Fence::NO_FENCE;
  1225. // freeBufferLocked puts this slot on the free slots list. Since
  1226. // we then attached a buffer, move the slot to free buffer list.
  1227. mCore->mFreeBuffers.push_front(*slot);
  1228. BQ_LOGV("allocateBuffers: allocated a new buffer in slot %d",
  1229. *slot);
  1230. // Make sure the erase is done after all uses of the slot
  1231. // iterator since it will be invalid after this point.
  1232. mCore->mFreeSlots.erase(slot);
  1233. }
  1234. mCore->mIsAllocating = false;
  1235. mCore->mIsAllocatingCondition.notify_all();
  1236. VALIDATE_CONSISTENCY();
  1237. // If dequeue is waiting for to allocate a buffer, release the lock until it's not
  1238. // waiting anymore so it can use the buffer we just allocated.
  1239. while (mDequeueWaitingForAllocation) {
  1240. mDequeueWaitingForAllocationCondition.wait(lock);
  1241. }
  1242. } // Autolock scope
  1243. }
  1244. }
  1245. status_t BufferQueueProducer::allowAllocation(bool allow) {
  1246. ATRACE_CALL();
  1247. BQ_LOGV("allowAllocation: %s", allow ? "true" : "false");
  1248. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1249. mCore->mAllowAllocation = allow;
  1250. return NO_ERROR;
  1251. }
  1252. status_t BufferQueueProducer::setGenerationNumber(uint32_t generationNumber) {
  1253. ATRACE_CALL();
  1254. BQ_LOGV("setGenerationNumber: %u", generationNumber);
  1255. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1256. mCore->mGenerationNumber = generationNumber;
  1257. return NO_ERROR;
  1258. }
  1259. String8 BufferQueueProducer::getConsumerName() const {
  1260. ATRACE_CALL();
  1261. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1262. BQ_LOGV("getConsumerName: %s", mConsumerName.string());
  1263. return mConsumerName;
  1264. }
  1265. status_t BufferQueueProducer::setSharedBufferMode(bool sharedBufferMode) {
  1266. ATRACE_CALL();
  1267. BQ_LOGV("setSharedBufferMode: %d", sharedBufferMode);
  1268. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1269. if (!sharedBufferMode) {
  1270. mCore->mSharedBufferSlot = BufferQueueCore::INVALID_BUFFER_SLOT;
  1271. }
  1272. mCore->mSharedBufferMode = sharedBufferMode;
  1273. return NO_ERROR;
  1274. }
  1275. status_t BufferQueueProducer::setAutoRefresh(bool autoRefresh) {
  1276. ATRACE_CALL();
  1277. BQ_LOGV("setAutoRefresh: %d", autoRefresh);
  1278. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1279. mCore->mAutoRefresh = autoRefresh;
  1280. return NO_ERROR;
  1281. }
  1282. status_t BufferQueueProducer::setDequeueTimeout(nsecs_t timeout) {
  1283. ATRACE_CALL();
  1284. BQ_LOGV("setDequeueTimeout: %" PRId64, timeout);
  1285. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1286. bool dequeueBufferCannotBlock =
  1287. timeout >= 0 ? false : mCore->mDequeueBufferCannotBlock;
  1288. int delta = mCore->getMaxBufferCountLocked(mCore->mAsyncMode, dequeueBufferCannotBlock,
  1289. mCore->mMaxBufferCount) - mCore->getMaxBufferCountLocked();
  1290. if (!mCore->adjustAvailableSlotsLocked(delta)) {
  1291. BQ_LOGE("setDequeueTimeout: BufferQueue failed to adjust the number of "
  1292. "available slots. Delta = %d", delta);
  1293. return BAD_VALUE;
  1294. }
  1295. mDequeueTimeout = timeout;
  1296. mCore->mDequeueBufferCannotBlock = dequeueBufferCannotBlock;
  1297. if (timeout > 0) {
  1298. mCore->mQueueBufferCanDrop = false;
  1299. }
  1300. VALIDATE_CONSISTENCY();
  1301. return NO_ERROR;
  1302. }
  1303. status_t BufferQueueProducer::setLegacyBufferDrop(bool drop) {
  1304. ATRACE_CALL();
  1305. BQ_LOGV("setLegacyBufferDrop: drop = %d", drop);
  1306. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1307. mCore->mLegacyBufferDrop = drop;
  1308. return NO_ERROR;
  1309. }
  1310. status_t BufferQueueProducer::getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
  1311. sp<Fence>* outFence, float outTransformMatrix[16]) {
  1312. ATRACE_CALL();
  1313. BQ_LOGV("getLastQueuedBuffer");
  1314. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1315. if (mCore->mLastQueuedSlot == BufferItem::INVALID_BUFFER_SLOT) {
  1316. *outBuffer = nullptr;
  1317. *outFence = Fence::NO_FENCE;
  1318. return NO_ERROR;
  1319. }
  1320. *outBuffer = mSlots[mCore->mLastQueuedSlot].mGraphicBuffer;
  1321. *outFence = mLastQueueBufferFence;
  1322. // Currently only SurfaceFlinger internally ever changes
  1323. // GLConsumer's filtering mode, so we just use 'true' here as
  1324. // this is slightly specialized for the current client of this API,
  1325. // which does want filtering.
  1326. GLConsumer::computeTransformMatrix(outTransformMatrix,
  1327. mSlots[mCore->mLastQueuedSlot].mGraphicBuffer, mLastQueuedCrop,
  1328. mLastQueuedTransform, true /* filter */);
  1329. return NO_ERROR;
  1330. }
  1331. void BufferQueueProducer::getFrameTimestamps(FrameEventHistoryDelta* outDelta) {
  1332. addAndGetFrameTimestamps(nullptr, outDelta);
  1333. }
  1334. void BufferQueueProducer::addAndGetFrameTimestamps(
  1335. const NewFrameEventsEntry* newTimestamps,
  1336. FrameEventHistoryDelta* outDelta) {
  1337. if (newTimestamps == nullptr && outDelta == nullptr) {
  1338. return;
  1339. }
  1340. ATRACE_CALL();
  1341. BQ_LOGV("addAndGetFrameTimestamps");
  1342. sp<IConsumerListener> listener;
  1343. {
  1344. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1345. listener = mCore->mConsumerListener;
  1346. }
  1347. if (listener != nullptr) {
  1348. listener->addAndGetFrameTimestamps(newTimestamps, outDelta);
  1349. }
  1350. }
  1351. void BufferQueueProducer::binderDied(const wp<android::IBinder>& /* who */) {
  1352. // If we're here, it means that a producer we were connected to died.
  1353. // We're guaranteed that we are still connected to it because we remove
  1354. // this callback upon disconnect. It's therefore safe to read mConnectedApi
  1355. // without synchronization here.
  1356. int api = mCore->mConnectedApi;
  1357. disconnect(api);
  1358. }
  1359. status_t BufferQueueProducer::getUniqueId(uint64_t* outId) const {
  1360. BQ_LOGV("getUniqueId");
  1361. *outId = mCore->mUniqueId;
  1362. return NO_ERROR;
  1363. }
  1364. status_t BufferQueueProducer::getConsumerUsage(uint64_t* outUsage) const {
  1365. BQ_LOGV("getConsumerUsage");
  1366. std::lock_guard<std::mutex> lock(mCore->mMutex);
  1367. *outUsage = mCore->mConsumerUsageBits;
  1368. return NO_ERROR;
  1369. }
  1370. } // namespace android