buffer_hub_queue-test.cpp 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. #include <base/logging.h>
  2. #include <binder/Parcel.h>
  3. #include <dvr/dvr_api.h>
  4. #include <private/dvr/buffer_hub_queue_client.h>
  5. #include <private/dvr/consumer_buffer.h>
  6. #include <private/dvr/producer_buffer.h>
  7. #include <gtest/gtest.h>
  8. #include <poll.h>
  9. #include <sys/eventfd.h>
  10. #include <vector>
  11. // Enable/disable debug logging.
  12. #define TRACE 0
  13. namespace android {
  14. namespace dvr {
  15. using pdx::LocalChannelHandle;
  16. using pdx::LocalHandle;
  17. namespace {
  18. constexpr uint32_t kBufferWidth = 100;
  19. constexpr uint32_t kBufferHeight = 1;
  20. constexpr uint32_t kBufferLayerCount = 1;
  21. constexpr uint32_t kBufferFormat = HAL_PIXEL_FORMAT_BLOB;
  22. constexpr uint64_t kBufferUsage = GRALLOC_USAGE_SW_READ_RARELY;
  23. constexpr int kTimeoutMs = 100;
  24. constexpr int kNoTimeout = 0;
  25. class BufferHubQueueTest : public ::testing::Test {
  26. public:
  27. bool CreateProducerQueue(const ProducerQueueConfig& config,
  28. const UsagePolicy& usage) {
  29. producer_queue_ = ProducerQueue::Create(config, usage);
  30. return producer_queue_ != nullptr;
  31. }
  32. bool CreateConsumerQueue() {
  33. if (producer_queue_) {
  34. consumer_queue_ = producer_queue_->CreateConsumerQueue();
  35. return consumer_queue_ != nullptr;
  36. } else {
  37. return false;
  38. }
  39. }
  40. bool CreateQueues(const ProducerQueueConfig& config,
  41. const UsagePolicy& usage) {
  42. return CreateProducerQueue(config, usage) && CreateConsumerQueue();
  43. }
  44. void AllocateBuffer(size_t* slot_out = nullptr) {
  45. // Create producer buffer.
  46. auto status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
  47. kBufferLayerCount,
  48. kBufferFormat, kBufferUsage);
  49. ASSERT_TRUE(status.ok());
  50. size_t slot = status.take();
  51. if (slot_out)
  52. *slot_out = slot;
  53. }
  54. bool WaitAndHandleOnce(BufferHubQueue* queue, int timeout_ms) {
  55. pollfd pfd{queue->queue_fd(), POLLIN, 0};
  56. int ret;
  57. do {
  58. ret = poll(&pfd, 1, timeout_ms);
  59. } while (ret == -1 && errno == EINTR);
  60. if (ret < 0) {
  61. ALOGW("Failed to poll queue %d's event fd, error: %s.", queue->id(),
  62. strerror(errno));
  63. return false;
  64. } else if (ret == 0) {
  65. return false;
  66. }
  67. return queue->HandleQueueEvents();
  68. }
  69. protected:
  70. ProducerQueueConfigBuilder config_builder_;
  71. std::unique_ptr<ProducerQueue> producer_queue_;
  72. std::unique_ptr<ConsumerQueue> consumer_queue_;
  73. };
  74. TEST_F(BufferHubQueueTest, TestDequeue) {
  75. const int64_t nb_dequeue_times = 16;
  76. ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
  77. // Allocate only one buffer.
  78. AllocateBuffer();
  79. // But dequeue multiple times.
  80. for (int64_t i = 0; i < nb_dequeue_times; i++) {
  81. size_t slot;
  82. LocalHandle fence;
  83. DvrNativeBufferMetadata mi, mo;
  84. // Producer gains a buffer.
  85. auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  86. EXPECT_TRUE(p1_status.ok());
  87. auto p1 = p1_status.take();
  88. ASSERT_NE(p1, nullptr);
  89. // Producer posts the buffer.
  90. mi.index = i;
  91. EXPECT_EQ(p1->PostAsync(&mi, LocalHandle()), 0);
  92. // Consumer acquires a buffer.
  93. auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  94. EXPECT_TRUE(c1_status.ok()) << c1_status.GetErrorMessage();
  95. auto c1 = c1_status.take();
  96. ASSERT_NE(c1, nullptr);
  97. EXPECT_EQ(mi.index, i);
  98. EXPECT_EQ(mo.index, i);
  99. // Consumer releases the buffer.
  100. EXPECT_EQ(c1->ReleaseAsync(&mi, LocalHandle()), 0);
  101. }
  102. }
  103. TEST_F(BufferHubQueueTest,
  104. TestDequeuePostedBufferIfNoAvailableReleasedBuffer_withConsumerBuffer) {
  105. ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
  106. // Allocate 3 buffers to use.
  107. const size_t test_queue_capacity = 3;
  108. for (int64_t i = 0; i < test_queue_capacity; i++) {
  109. AllocateBuffer();
  110. }
  111. EXPECT_EQ(producer_queue_->capacity(), test_queue_capacity);
  112. size_t producer_slot, consumer_slot;
  113. LocalHandle fence;
  114. DvrNativeBufferMetadata mi, mo;
  115. // Producer posts 2 buffers and remember their posted sequence.
  116. std::deque<size_t> posted_slots;
  117. for (int64_t i = 0; i < 2; i++) {
  118. auto p1_status =
  119. producer_queue_->Dequeue(kTimeoutMs, &producer_slot, &mo, &fence, true);
  120. EXPECT_TRUE(p1_status.ok());
  121. auto p1 = p1_status.take();
  122. ASSERT_NE(p1, nullptr);
  123. // Producer should not be gaining posted buffer when there are still
  124. // available buffers to gain.
  125. auto found_iter =
  126. std::find(posted_slots.begin(), posted_slots.end(), producer_slot);
  127. EXPECT_EQ(found_iter, posted_slots.end());
  128. posted_slots.push_back(producer_slot);
  129. // Producer posts the buffer.
  130. mi.index = i;
  131. EXPECT_EQ(0, p1->PostAsync(&mi, LocalHandle()));
  132. }
  133. // Consumer acquires one buffer.
  134. auto c1_status =
  135. consumer_queue_->Dequeue(kTimeoutMs, &consumer_slot, &mo, &fence);
  136. EXPECT_TRUE(c1_status.ok());
  137. auto c1 = c1_status.take();
  138. ASSERT_NE(c1, nullptr);
  139. // Consumer should get the oldest posted buffer. No checks here.
  140. // posted_slots[0] should be in acquired state now.
  141. EXPECT_EQ(mo.index, 0);
  142. // Consumer releases the buffer.
  143. EXPECT_EQ(c1->ReleaseAsync(&mi, LocalHandle()), 0);
  144. // posted_slots[0] should be in released state now.
  145. // Producer gain and post 2 buffers.
  146. for (int64_t i = 0; i < 2; i++) {
  147. auto p1_status =
  148. producer_queue_->Dequeue(kTimeoutMs, &producer_slot, &mo, &fence, true);
  149. EXPECT_TRUE(p1_status.ok());
  150. auto p1 = p1_status.take();
  151. ASSERT_NE(p1, nullptr);
  152. // The gained buffer should be the one in released state or the one haven't
  153. // been use.
  154. EXPECT_NE(posted_slots[1], producer_slot);
  155. mi.index = i + 2;
  156. EXPECT_EQ(0, p1->PostAsync(&mi, LocalHandle()));
  157. }
  158. // Producer gains a buffer.
  159. auto p1_status =
  160. producer_queue_->Dequeue(kTimeoutMs, &producer_slot, &mo, &fence, true);
  161. EXPECT_TRUE(p1_status.ok());
  162. auto p1 = p1_status.take();
  163. ASSERT_NE(p1, nullptr);
  164. // The gained buffer should be the oldest posted buffer.
  165. EXPECT_EQ(posted_slots[1], producer_slot);
  166. // Producer posts the buffer.
  167. mi.index = 4;
  168. EXPECT_EQ(0, p1->PostAsync(&mi, LocalHandle()));
  169. }
  170. TEST_F(BufferHubQueueTest,
  171. TestDequeuePostedBufferIfNoAvailableReleasedBuffer_noConsumerBuffer) {
  172. ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
  173. // Allocate 4 buffers to use.
  174. const size_t test_queue_capacity = 4;
  175. for (int64_t i = 0; i < test_queue_capacity; i++) {
  176. AllocateBuffer();
  177. }
  178. EXPECT_EQ(producer_queue_->capacity(), test_queue_capacity);
  179. // Post all allowed buffers and remember their posted sequence.
  180. std::deque<size_t> posted_slots;
  181. for (int64_t i = 0; i < test_queue_capacity; i++) {
  182. size_t slot;
  183. LocalHandle fence;
  184. DvrNativeBufferMetadata mi, mo;
  185. // Producer gains a buffer.
  186. auto p1_status =
  187. producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence, true);
  188. EXPECT_TRUE(p1_status.ok());
  189. auto p1 = p1_status.take();
  190. ASSERT_NE(p1, nullptr);
  191. // Producer should not be gaining posted buffer when there are still
  192. // available buffers to gain.
  193. auto found_iter = std::find(posted_slots.begin(), posted_slots.end(), slot);
  194. EXPECT_EQ(found_iter, posted_slots.end());
  195. posted_slots.push_back(slot);
  196. // Producer posts the buffer.
  197. mi.index = i;
  198. EXPECT_EQ(p1->PostAsync(&mi, LocalHandle()), 0);
  199. }
  200. // Gain posted buffers in sequence.
  201. const int64_t nb_dequeue_all_times = 2;
  202. for (int j = 0; j < nb_dequeue_all_times; ++j) {
  203. for (int i = 0; i < test_queue_capacity; ++i) {
  204. size_t slot;
  205. LocalHandle fence;
  206. DvrNativeBufferMetadata mi, mo;
  207. // Producer gains a buffer.
  208. auto p1_status =
  209. producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence, true);
  210. EXPECT_TRUE(p1_status.ok());
  211. auto p1 = p1_status.take();
  212. ASSERT_NE(p1, nullptr);
  213. // The gained buffer should be the oldest posted buffer.
  214. EXPECT_EQ(posted_slots[i], slot);
  215. // Producer posts the buffer.
  216. mi.index = i + test_queue_capacity * (j + 1);
  217. EXPECT_EQ(p1->PostAsync(&mi, LocalHandle()), 0);
  218. }
  219. }
  220. }
  221. TEST_F(BufferHubQueueTest, TestProducerConsumer) {
  222. const size_t kBufferCount = 16;
  223. size_t slot;
  224. DvrNativeBufferMetadata mi, mo;
  225. LocalHandle fence;
  226. ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
  227. for (size_t i = 0; i < kBufferCount; i++) {
  228. AllocateBuffer();
  229. // Producer queue has all the available buffers on initialize.
  230. ASSERT_EQ(producer_queue_->count(), i + 1);
  231. ASSERT_EQ(producer_queue_->capacity(), i + 1);
  232. // Consumer queue has no avaiable buffer on initialize.
  233. ASSERT_EQ(consumer_queue_->count(), 0U);
  234. // Consumer queue does not import buffers until a dequeue is issued.
  235. ASSERT_EQ(consumer_queue_->capacity(), i);
  236. // Dequeue returns timeout since no buffer is ready to consumer, but
  237. // this implicitly triggers buffer import and bump up |capacity|.
  238. auto status = consumer_queue_->Dequeue(kNoTimeout, &slot, &mo, &fence);
  239. ASSERT_FALSE(status.ok());
  240. ASSERT_EQ(ETIMEDOUT, status.error());
  241. ASSERT_EQ(consumer_queue_->capacity(), i + 1);
  242. }
  243. // Use eventfd as a stand-in for a fence.
  244. LocalHandle post_fence(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
  245. for (size_t i = 0; i < kBufferCount; i++) {
  246. // First time there is no buffer available to dequeue.
  247. auto consumer_status =
  248. consumer_queue_->Dequeue(kNoTimeout, &slot, &mo, &fence);
  249. ASSERT_FALSE(consumer_status.ok());
  250. ASSERT_EQ(consumer_status.error(), ETIMEDOUT);
  251. // Make sure Producer buffer is POSTED so that it's ready to Accquire
  252. // in the consumer's Dequeue() function.
  253. auto producer_status =
  254. producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  255. ASSERT_TRUE(producer_status.ok());
  256. auto producer = producer_status.take();
  257. ASSERT_NE(nullptr, producer);
  258. mi.index = static_cast<int64_t>(i);
  259. ASSERT_EQ(producer->PostAsync(&mi, post_fence), 0);
  260. // Second time the just the POSTED buffer should be dequeued.
  261. consumer_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  262. ASSERT_TRUE(consumer_status.ok());
  263. EXPECT_TRUE(fence.IsValid());
  264. auto consumer = consumer_status.take();
  265. ASSERT_NE(nullptr, consumer);
  266. ASSERT_EQ(mi.index, mo.index);
  267. }
  268. }
  269. TEST_F(BufferHubQueueTest, TestInsertBuffer) {
  270. ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
  271. consumer_queue_ = producer_queue_->CreateConsumerQueue();
  272. ASSERT_TRUE(consumer_queue_ != nullptr);
  273. EXPECT_EQ(producer_queue_->capacity(), 0);
  274. EXPECT_EQ(consumer_queue_->capacity(), 0);
  275. std::shared_ptr<ProducerBuffer> p1 = ProducerBuffer::Create(
  276. kBufferWidth, kBufferHeight, kBufferFormat, kBufferUsage, 0);
  277. ASSERT_TRUE(p1 != nullptr);
  278. ASSERT_EQ(p1->GainAsync(), 0);
  279. // Inserting a posted buffer will fail.
  280. DvrNativeBufferMetadata meta;
  281. EXPECT_EQ(p1->PostAsync(&meta, LocalHandle()), 0);
  282. auto status_or_slot = producer_queue_->InsertBuffer(p1);
  283. EXPECT_FALSE(status_or_slot.ok());
  284. EXPECT_EQ(status_or_slot.error(), EINVAL);
  285. // Inserting a gained buffer will succeed.
  286. std::shared_ptr<ProducerBuffer> p2 = ProducerBuffer::Create(
  287. kBufferWidth, kBufferHeight, kBufferFormat, kBufferUsage);
  288. ASSERT_EQ(p2->GainAsync(), 0);
  289. ASSERT_TRUE(p2 != nullptr);
  290. status_or_slot = producer_queue_->InsertBuffer(p2);
  291. EXPECT_TRUE(status_or_slot.ok()) << status_or_slot.GetErrorMessage();
  292. // This is the first buffer inserted, should take slot 0.
  293. size_t slot = status_or_slot.get();
  294. EXPECT_EQ(slot, 0);
  295. // Wait and expect the consumer to kick up the newly inserted buffer.
  296. WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs);
  297. EXPECT_EQ(consumer_queue_->capacity(), 1ULL);
  298. }
  299. TEST_F(BufferHubQueueTest, TestRemoveBuffer) {
  300. ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
  301. DvrNativeBufferMetadata mo;
  302. // Allocate buffers.
  303. const size_t kBufferCount = 4u;
  304. for (size_t i = 0; i < kBufferCount; i++) {
  305. AllocateBuffer();
  306. }
  307. ASSERT_EQ(kBufferCount, producer_queue_->count());
  308. ASSERT_EQ(kBufferCount, producer_queue_->capacity());
  309. consumer_queue_ = producer_queue_->CreateConsumerQueue();
  310. ASSERT_NE(nullptr, consumer_queue_);
  311. // Check that buffers are correctly imported on construction.
  312. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  313. EXPECT_EQ(0u, consumer_queue_->count());
  314. // Dequeue all the buffers and keep track of them in an array. This prevents
  315. // the producer queue ring buffer ref counts from interfering with the tests.
  316. struct Entry {
  317. std::shared_ptr<ProducerBuffer> buffer;
  318. LocalHandle fence;
  319. size_t slot;
  320. };
  321. std::array<Entry, kBufferCount> buffers;
  322. for (size_t i = 0; i < kBufferCount; i++) {
  323. Entry* entry = &buffers[i];
  324. auto producer_status =
  325. producer_queue_->Dequeue(kTimeoutMs, &entry->slot, &mo, &entry->fence);
  326. ASSERT_TRUE(producer_status.ok());
  327. entry->buffer = producer_status.take();
  328. ASSERT_NE(nullptr, entry->buffer);
  329. }
  330. // Remove a buffer and make sure both queues reflect the change.
  331. ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[0].slot));
  332. EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity());
  333. // As long as the removed buffer is still alive the consumer queue won't know
  334. // its gone.
  335. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  336. EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
  337. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  338. // Release the removed buffer.
  339. buffers[0].buffer = nullptr;
  340. // Now the consumer queue should know it's gone.
  341. EXPECT_FALSE(WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs));
  342. ASSERT_EQ(kBufferCount - 1, consumer_queue_->capacity());
  343. // Allocate a new buffer. This should take the first empty slot.
  344. size_t slot;
  345. AllocateBuffer(&slot);
  346. ALOGE_IF(TRACE, "ALLOCATE %zu", slot);
  347. EXPECT_EQ(buffers[0].slot, slot);
  348. EXPECT_EQ(kBufferCount, producer_queue_->capacity());
  349. // The consumer queue should pick up the new buffer.
  350. EXPECT_EQ(kBufferCount - 1, consumer_queue_->capacity());
  351. EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
  352. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  353. // Remove and allocate a buffer.
  354. ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[1].slot));
  355. EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity());
  356. buffers[1].buffer = nullptr;
  357. AllocateBuffer(&slot);
  358. ALOGE_IF(TRACE, "ALLOCATE %zu", slot);
  359. EXPECT_EQ(buffers[1].slot, slot);
  360. EXPECT_EQ(kBufferCount, producer_queue_->capacity());
  361. // The consumer queue should pick up the new buffer but the count shouldn't
  362. // change.
  363. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  364. EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
  365. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  366. // Remove and allocate a buffer, but don't free the buffer right away.
  367. ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[2].slot));
  368. EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity());
  369. AllocateBuffer(&slot);
  370. ALOGE_IF(TRACE, "ALLOCATE %zu", slot);
  371. EXPECT_EQ(buffers[2].slot, slot);
  372. EXPECT_EQ(kBufferCount, producer_queue_->capacity());
  373. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  374. EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
  375. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  376. // Release the producer buffer to trigger a POLLHUP event for an already
  377. // removed buffer.
  378. buffers[2].buffer = nullptr;
  379. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  380. EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
  381. EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
  382. }
  383. TEST_F(BufferHubQueueTest, TestMultipleConsumers) {
  384. // ProducerConfigureBuilder doesn't set Metadata{size}, which means there
  385. // is no metadata associated with this BufferQueue's buffer.
  386. ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
  387. // Allocate buffers.
  388. const size_t kBufferCount = 4u;
  389. for (size_t i = 0; i < kBufferCount; i++) {
  390. AllocateBuffer();
  391. }
  392. ASSERT_EQ(kBufferCount, producer_queue_->count());
  393. // Build a silent consumer queue to test multi-consumer queue features.
  394. auto silent_queue = producer_queue_->CreateSilentConsumerQueue();
  395. ASSERT_NE(nullptr, silent_queue);
  396. // Check that silent queue doesn't import buffers on creation.
  397. EXPECT_EQ(silent_queue->capacity(), 0U);
  398. // Dequeue and post a buffer.
  399. size_t slot;
  400. LocalHandle fence;
  401. DvrNativeBufferMetadata mi, mo;
  402. auto producer_status =
  403. producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  404. EXPECT_TRUE(producer_status.ok());
  405. auto producer_buffer = producer_status.take();
  406. ASSERT_NE(producer_buffer, nullptr);
  407. EXPECT_EQ(producer_buffer->PostAsync(&mi, {}), 0);
  408. // After post, check the number of remaining available buffers.
  409. EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
  410. // Currently we expect no buffer to be available prior to calling
  411. // WaitForBuffers/HandleQueueEvents.
  412. // TODO(eieio): Note this behavior may change in the future.
  413. EXPECT_EQ(silent_queue->count(), 0U);
  414. EXPECT_FALSE(silent_queue->HandleQueueEvents());
  415. EXPECT_EQ(silent_queue->count(), 0U);
  416. // Build a new consumer queue to test multi-consumer queue features.
  417. consumer_queue_ = silent_queue->CreateConsumerQueue();
  418. ASSERT_NE(consumer_queue_, nullptr);
  419. // Check that buffers are correctly imported on construction.
  420. EXPECT_EQ(consumer_queue_->capacity(), kBufferCount);
  421. // Buffers are only imported, but their availability is not checked until
  422. // first call to Dequeue().
  423. EXPECT_EQ(consumer_queue_->count(), 0U);
  424. // Reclaim released/ignored buffers.
  425. EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
  426. usleep(10000);
  427. WaitAndHandleOnce(producer_queue_.get(), kTimeoutMs);
  428. EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
  429. // Post another buffer.
  430. producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  431. EXPECT_TRUE(producer_status.ok());
  432. producer_buffer = producer_status.take();
  433. ASSERT_NE(producer_buffer, nullptr);
  434. EXPECT_EQ(producer_buffer->PostAsync(&mi, {}), 0);
  435. // Verify that the consumer queue receives it.
  436. size_t consumer_queue_count = consumer_queue_->count();
  437. WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs);
  438. EXPECT_GT(consumer_queue_->count(), consumer_queue_count);
  439. // Save the current consumer queue buffer count to compare after the dequeue.
  440. consumer_queue_count = consumer_queue_->count();
  441. // Dequeue and acquire/release (discard) buffers on the consumer end.
  442. auto consumer_status =
  443. consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  444. EXPECT_TRUE(consumer_status.ok());
  445. auto consumer_buffer = consumer_status.take();
  446. ASSERT_NE(consumer_buffer, nullptr);
  447. consumer_buffer->Discard();
  448. // Buffer should be returned to the producer queue without being handled by
  449. // the silent consumer queue.
  450. EXPECT_LT(consumer_queue_->count(), consumer_queue_count);
  451. EXPECT_EQ(producer_queue_->count(), kBufferCount - 2);
  452. WaitAndHandleOnce(producer_queue_.get(), kTimeoutMs);
  453. EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
  454. }
  455. struct TestUserMetadata {
  456. char a;
  457. int32_t b;
  458. int64_t c;
  459. };
  460. constexpr uint64_t kUserMetadataSize =
  461. static_cast<uint64_t>(sizeof(TestUserMetadata));
  462. TEST_F(BufferHubQueueTest, TestUserMetadata) {
  463. ASSERT_TRUE(CreateQueues(
  464. config_builder_.SetMetadata<TestUserMetadata>().Build(), UsagePolicy{}));
  465. AllocateBuffer();
  466. std::vector<TestUserMetadata> user_metadata_list = {
  467. {'0', 0, 0}, {'1', 10, 3333}, {'@', 123, 1000000000}};
  468. for (auto user_metadata : user_metadata_list) {
  469. size_t slot;
  470. LocalHandle fence;
  471. DvrNativeBufferMetadata mi, mo;
  472. auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  473. EXPECT_TRUE(p1_status.ok());
  474. auto p1 = p1_status.take();
  475. ASSERT_NE(p1, nullptr);
  476. // TODO(b/69469185): Test against metadata from consumer once we implement
  477. // release metadata properly.
  478. // EXPECT_EQ(mo.user_metadata_ptr, 0U);
  479. // EXPECT_EQ(mo.user_metadata_size, 0U);
  480. mi.user_metadata_size = kUserMetadataSize;
  481. mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
  482. EXPECT_EQ(p1->PostAsync(&mi, {}), 0);
  483. auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  484. EXPECT_TRUE(c1_status.ok()) << c1_status.GetErrorMessage();
  485. auto c1 = c1_status.take();
  486. ASSERT_NE(c1, nullptr);
  487. EXPECT_EQ(mo.user_metadata_size, kUserMetadataSize);
  488. auto out_user_metadata =
  489. reinterpret_cast<TestUserMetadata*>(mo.user_metadata_ptr);
  490. EXPECT_EQ(user_metadata.a, out_user_metadata->a);
  491. EXPECT_EQ(user_metadata.b, out_user_metadata->b);
  492. EXPECT_EQ(user_metadata.c, out_user_metadata->c);
  493. // When release, empty metadata is also legit.
  494. mi.user_metadata_size = 0U;
  495. mi.user_metadata_ptr = 0U;
  496. c1->ReleaseAsync(&mi, {});
  497. }
  498. }
  499. TEST_F(BufferHubQueueTest, TestUserMetadataMismatch) {
  500. ASSERT_TRUE(CreateQueues(
  501. config_builder_.SetMetadata<TestUserMetadata>().Build(), UsagePolicy{}));
  502. AllocateBuffer();
  503. TestUserMetadata user_metadata;
  504. size_t slot;
  505. LocalHandle fence;
  506. DvrNativeBufferMetadata mi, mo;
  507. auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  508. EXPECT_TRUE(p1_status.ok());
  509. auto p1 = p1_status.take();
  510. ASSERT_NE(p1, nullptr);
  511. // Post with mismatched user metadata size will fail. But the producer buffer
  512. // itself should stay untouched.
  513. mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
  514. mi.user_metadata_size = kUserMetadataSize + 1;
  515. EXPECT_EQ(p1->PostAsync(&mi, {}), -E2BIG);
  516. // Post with the exact same user metdata size can success.
  517. mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
  518. mi.user_metadata_size = kUserMetadataSize;
  519. EXPECT_EQ(p1->PostAsync(&mi, {}), 0);
  520. }
  521. TEST_F(BufferHubQueueTest, TestEnqueue) {
  522. ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
  523. UsagePolicy{}));
  524. AllocateBuffer();
  525. size_t slot;
  526. LocalHandle fence;
  527. DvrNativeBufferMetadata mo;
  528. auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  529. ASSERT_TRUE(p1_status.ok());
  530. auto p1 = p1_status.take();
  531. ASSERT_NE(nullptr, p1);
  532. producer_queue_->Enqueue(p1, slot, 0ULL);
  533. auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  534. ASSERT_FALSE(c1_status.ok());
  535. }
  536. TEST_F(BufferHubQueueTest, TestAllocateBuffer) {
  537. ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
  538. size_t ps1;
  539. AllocateBuffer();
  540. LocalHandle fence;
  541. DvrNativeBufferMetadata mi, mo;
  542. auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &ps1, &mo, &fence);
  543. ASSERT_TRUE(p1_status.ok());
  544. auto p1 = p1_status.take();
  545. ASSERT_NE(p1, nullptr);
  546. // producer queue is exhausted
  547. size_t ps2;
  548. auto p2_status = producer_queue_->Dequeue(kTimeoutMs, &ps2, &mo, &fence);
  549. ASSERT_FALSE(p2_status.ok());
  550. ASSERT_EQ(ETIMEDOUT, p2_status.error());
  551. // dynamically add buffer.
  552. AllocateBuffer();
  553. ASSERT_EQ(producer_queue_->count(), 1U);
  554. ASSERT_EQ(producer_queue_->capacity(), 2U);
  555. // now we can dequeue again
  556. p2_status = producer_queue_->Dequeue(kTimeoutMs, &ps2, &mo, &fence);
  557. ASSERT_TRUE(p2_status.ok());
  558. auto p2 = p2_status.take();
  559. ASSERT_NE(p2, nullptr);
  560. ASSERT_EQ(producer_queue_->count(), 0U);
  561. // p1 and p2 should have different slot number
  562. ASSERT_NE(ps1, ps2);
  563. // Consumer queue does not import buffers until |Dequeue| or |ImportBuffers|
  564. // are called. So far consumer_queue_ should be empty.
  565. ASSERT_EQ(consumer_queue_->count(), 0U);
  566. int64_t seq = 1;
  567. mi.index = seq;
  568. ASSERT_EQ(p1->PostAsync(&mi, {}), 0);
  569. size_t cs1, cs2;
  570. auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &cs1, &mo, &fence);
  571. ASSERT_TRUE(c1_status.ok()) << c1_status.GetErrorMessage();
  572. auto c1 = c1_status.take();
  573. ASSERT_NE(c1, nullptr);
  574. ASSERT_EQ(consumer_queue_->count(), 0U);
  575. ASSERT_EQ(consumer_queue_->capacity(), 2U);
  576. ASSERT_EQ(cs1, ps1);
  577. ASSERT_EQ(p2->PostAsync(&mi, {}), 0);
  578. auto c2_status = consumer_queue_->Dequeue(kTimeoutMs, &cs2, &mo, &fence);
  579. ASSERT_TRUE(c2_status.ok());
  580. auto c2 = c2_status.take();
  581. ASSERT_NE(c2, nullptr);
  582. ASSERT_EQ(cs2, ps2);
  583. }
  584. TEST_F(BufferHubQueueTest, TestAllocateTwoBuffers) {
  585. ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
  586. ASSERT_EQ(producer_queue_->capacity(), 0);
  587. auto status = producer_queue_->AllocateBuffers(
  588. kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
  589. kBufferUsage, /*buffer_count=*/2);
  590. ASSERT_TRUE(status.ok());
  591. std::vector<size_t> buffer_slots = status.take();
  592. ASSERT_EQ(buffer_slots.size(), 2);
  593. ASSERT_EQ(producer_queue_->capacity(), 2);
  594. }
  595. TEST_F(BufferHubQueueTest, TestAllocateZeroBuffers) {
  596. ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
  597. ASSERT_EQ(producer_queue_->capacity(), 0);
  598. auto status = producer_queue_->AllocateBuffers(
  599. kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
  600. kBufferUsage, /*buffer_count=*/0);
  601. ASSERT_TRUE(status.ok());
  602. std::vector<size_t> buffer_slots = status.take();
  603. ASSERT_EQ(buffer_slots.size(), 0);
  604. ASSERT_EQ(producer_queue_->capacity(), 0);
  605. }
  606. TEST_F(BufferHubQueueTest, TestUsageSetMask) {
  607. const uint32_t set_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
  608. ASSERT_TRUE(
  609. CreateQueues(config_builder_.Build(), UsagePolicy{set_mask, 0, 0, 0}));
  610. // When allocation, leave out |set_mask| from usage bits on purpose.
  611. auto status = producer_queue_->AllocateBuffer(
  612. kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
  613. kBufferUsage & ~set_mask);
  614. ASSERT_TRUE(status.ok());
  615. LocalHandle fence;
  616. size_t slot;
  617. DvrNativeBufferMetadata mo;
  618. auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  619. ASSERT_TRUE(p1_status.ok());
  620. auto p1 = p1_status.take();
  621. ASSERT_EQ(p1->usage() & set_mask, set_mask);
  622. }
  623. TEST_F(BufferHubQueueTest, TestUsageClearMask) {
  624. const uint32_t clear_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
  625. ASSERT_TRUE(
  626. CreateQueues(config_builder_.Build(), UsagePolicy{0, clear_mask, 0, 0}));
  627. // When allocation, add |clear_mask| into usage bits on purpose.
  628. auto status = producer_queue_->AllocateBuffer(
  629. kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
  630. kBufferUsage | clear_mask);
  631. ASSERT_TRUE(status.ok());
  632. LocalHandle fence;
  633. size_t slot;
  634. DvrNativeBufferMetadata mo;
  635. auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  636. ASSERT_TRUE(p1_status.ok());
  637. auto p1 = p1_status.take();
  638. ASSERT_EQ(p1->usage() & clear_mask, 0U);
  639. }
  640. TEST_F(BufferHubQueueTest, TestUsageDenySetMask) {
  641. const uint32_t deny_set_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
  642. ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
  643. UsagePolicy{0, 0, deny_set_mask, 0}));
  644. // Now that |deny_set_mask| is illegal, allocation without those bits should
  645. // be able to succeed.
  646. auto status = producer_queue_->AllocateBuffer(
  647. kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
  648. kBufferUsage & ~deny_set_mask);
  649. ASSERT_TRUE(status.ok());
  650. // While allocation with those bits should fail.
  651. status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
  652. kBufferLayerCount, kBufferFormat,
  653. kBufferUsage | deny_set_mask);
  654. ASSERT_FALSE(status.ok());
  655. ASSERT_EQ(EINVAL, status.error());
  656. }
  657. TEST_F(BufferHubQueueTest, TestUsageDenyClearMask) {
  658. const uint32_t deny_clear_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
  659. ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
  660. UsagePolicy{0, 0, 0, deny_clear_mask}));
  661. // Now that clearing |deny_clear_mask| is illegal (i.e. setting these bits are
  662. // mandatory), allocation with those bits should be able to succeed.
  663. auto status = producer_queue_->AllocateBuffer(
  664. kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
  665. kBufferUsage | deny_clear_mask);
  666. ASSERT_TRUE(status.ok());
  667. // While allocation without those bits should fail.
  668. status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
  669. kBufferLayerCount, kBufferFormat,
  670. kBufferUsage & ~deny_clear_mask);
  671. ASSERT_FALSE(status.ok());
  672. ASSERT_EQ(EINVAL, status.error());
  673. }
  674. TEST_F(BufferHubQueueTest, TestQueueInfo) {
  675. static const bool kIsAsync = true;
  676. ASSERT_TRUE(CreateQueues(config_builder_.SetIsAsync(kIsAsync)
  677. .SetDefaultWidth(kBufferWidth)
  678. .SetDefaultHeight(kBufferHeight)
  679. .SetDefaultFormat(kBufferFormat)
  680. .Build(),
  681. UsagePolicy{}));
  682. EXPECT_EQ(producer_queue_->default_width(), kBufferWidth);
  683. EXPECT_EQ(producer_queue_->default_height(), kBufferHeight);
  684. EXPECT_EQ(producer_queue_->default_format(), kBufferFormat);
  685. EXPECT_EQ(producer_queue_->is_async(), kIsAsync);
  686. EXPECT_EQ(consumer_queue_->default_width(), kBufferWidth);
  687. EXPECT_EQ(consumer_queue_->default_height(), kBufferHeight);
  688. EXPECT_EQ(consumer_queue_->default_format(), kBufferFormat);
  689. EXPECT_EQ(consumer_queue_->is_async(), kIsAsync);
  690. }
  691. TEST_F(BufferHubQueueTest, TestFreeAllBuffers) {
  692. constexpr size_t kBufferCount = 2;
  693. #define CHECK_NO_BUFFER_THEN_ALLOCATE(num_buffers) \
  694. EXPECT_EQ(consumer_queue_->count(), 0U); \
  695. EXPECT_EQ(consumer_queue_->capacity(), 0U); \
  696. EXPECT_EQ(producer_queue_->count(), 0U); \
  697. EXPECT_EQ(producer_queue_->capacity(), 0U); \
  698. for (size_t i = 0; i < num_buffers; i++) { \
  699. AllocateBuffer(); \
  700. } \
  701. EXPECT_EQ(producer_queue_->count(), num_buffers); \
  702. EXPECT_EQ(producer_queue_->capacity(), num_buffers);
  703. size_t slot;
  704. LocalHandle fence;
  705. pdx::Status<void> status;
  706. pdx::Status<std::shared_ptr<ConsumerBuffer>> consumer_status;
  707. pdx::Status<std::shared_ptr<ProducerBuffer>> producer_status;
  708. std::shared_ptr<ConsumerBuffer> consumer_buffer;
  709. std::shared_ptr<ProducerBuffer> producer_buffer;
  710. DvrNativeBufferMetadata mi, mo;
  711. ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
  712. // Free all buffers when buffers are avaible for dequeue.
  713. CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
  714. status = producer_queue_->FreeAllBuffers();
  715. EXPECT_TRUE(status.ok());
  716. // Free all buffers when one buffer is dequeued.
  717. CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
  718. producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  719. ASSERT_TRUE(producer_status.ok());
  720. status = producer_queue_->FreeAllBuffers();
  721. EXPECT_TRUE(status.ok());
  722. // Free all buffers when all buffers are dequeued.
  723. CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
  724. for (size_t i = 0; i < kBufferCount; i++) {
  725. producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  726. ASSERT_TRUE(producer_status.ok());
  727. }
  728. status = producer_queue_->FreeAllBuffers();
  729. EXPECT_TRUE(status.ok());
  730. // Free all buffers when one buffer is posted.
  731. CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
  732. producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  733. ASSERT_TRUE(producer_status.ok());
  734. producer_buffer = producer_status.take();
  735. ASSERT_NE(nullptr, producer_buffer);
  736. ASSERT_EQ(0, producer_buffer->PostAsync(&mi, fence));
  737. status = producer_queue_->FreeAllBuffers();
  738. EXPECT_TRUE(status.ok());
  739. // Free all buffers when all buffers are posted.
  740. CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
  741. for (size_t i = 0; i < kBufferCount; i++) {
  742. producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  743. ASSERT_TRUE(producer_status.ok());
  744. producer_buffer = producer_status.take();
  745. ASSERT_NE(producer_buffer, nullptr);
  746. ASSERT_EQ(producer_buffer->PostAsync(&mi, fence), 0);
  747. }
  748. status = producer_queue_->FreeAllBuffers();
  749. EXPECT_TRUE(status.ok());
  750. // Free all buffers when all buffers are acquired.
  751. CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
  752. for (size_t i = 0; i < kBufferCount; i++) {
  753. producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  754. ASSERT_TRUE(producer_status.ok());
  755. producer_buffer = producer_status.take();
  756. ASSERT_NE(producer_buffer, nullptr);
  757. ASSERT_EQ(producer_buffer->PostAsync(&mi, fence), 0);
  758. consumer_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
  759. ASSERT_TRUE(consumer_status.ok()) << consumer_status.GetErrorMessage();
  760. }
  761. status = producer_queue_->FreeAllBuffers();
  762. EXPECT_TRUE(status.ok());
  763. // In addition to FreeAllBuffers() from the queue, it is also required to
  764. // delete all references to the ProducerBuffer (i.e. the PDX client).
  765. producer_buffer = nullptr;
  766. // Crank consumer queue events to pickup EPOLLHUP events on the queue.
  767. consumer_queue_->HandleQueueEvents();
  768. // One last check.
  769. CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
  770. #undef CHECK_NO_BUFFER_THEN_ALLOCATE
  771. }
  772. TEST_F(BufferHubQueueTest, TestProducerToParcelableNotEmpty) {
  773. ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<uint64_t>().Build(),
  774. UsagePolicy{}));
  775. // Allocate only one buffer.
  776. AllocateBuffer();
  777. // Export should fail as the queue is not empty.
  778. auto status = producer_queue_->TakeAsParcelable();
  779. EXPECT_FALSE(status.ok());
  780. }
  781. TEST_F(BufferHubQueueTest, TestProducerExportToParcelable) {
  782. ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
  783. auto s1 = producer_queue_->TakeAsParcelable();
  784. EXPECT_TRUE(s1.ok());
  785. ProducerQueueParcelable output_parcelable = s1.take();
  786. EXPECT_TRUE(output_parcelable.IsValid());
  787. Parcel parcel;
  788. status_t res;
  789. res = output_parcelable.writeToParcel(&parcel);
  790. EXPECT_EQ(res, OK);
  791. // After written into parcelable, the output_parcelable is still valid has
  792. // keeps the producer channel alive.
  793. EXPECT_TRUE(output_parcelable.IsValid());
  794. // Creating producer buffer should fail.
  795. auto s2 = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
  796. kBufferLayerCount, kBufferFormat,
  797. kBufferUsage);
  798. ASSERT_FALSE(s2.ok());
  799. // Reset the data position so that we can read back from the same parcel
  800. // without doing actually Binder IPC.
  801. parcel.setDataPosition(0);
  802. producer_queue_ = nullptr;
  803. // Recreate the producer queue from the parcel.
  804. ProducerQueueParcelable input_parcelable;
  805. EXPECT_FALSE(input_parcelable.IsValid());
  806. res = input_parcelable.readFromParcel(&parcel);
  807. EXPECT_EQ(res, OK);
  808. EXPECT_TRUE(input_parcelable.IsValid());
  809. EXPECT_EQ(producer_queue_, nullptr);
  810. producer_queue_ = ProducerQueue::Import(input_parcelable.TakeChannelHandle());
  811. EXPECT_FALSE(input_parcelable.IsValid());
  812. ASSERT_NE(producer_queue_, nullptr);
  813. // Newly created queue from the parcel can allocate buffer, post buffer to
  814. // consumer.
  815. EXPECT_NO_FATAL_FAILURE(AllocateBuffer());
  816. EXPECT_EQ(producer_queue_->count(), 1U);
  817. EXPECT_EQ(producer_queue_->capacity(), 1U);
  818. size_t slot;
  819. DvrNativeBufferMetadata producer_meta;
  820. DvrNativeBufferMetadata consumer_meta;
  821. LocalHandle fence;
  822. auto s3 = producer_queue_->Dequeue(0, &slot, &producer_meta, &fence);
  823. EXPECT_TRUE(s3.ok());
  824. std::shared_ptr<ProducerBuffer> p1 = s3.take();
  825. ASSERT_NE(p1, nullptr);
  826. producer_meta.timestamp = 42;
  827. EXPECT_EQ(p1->PostAsync(&producer_meta, LocalHandle()), 0);
  828. // Make sure the buffer can be dequeued from consumer side.
  829. auto s4 = consumer_queue_->Dequeue(kTimeoutMs, &slot, &consumer_meta, &fence);
  830. EXPECT_TRUE(s4.ok()) << s4.GetErrorMessage();
  831. EXPECT_EQ(consumer_queue_->capacity(), 1U);
  832. auto consumer = s4.take();
  833. ASSERT_NE(consumer, nullptr);
  834. EXPECT_EQ(producer_meta.timestamp, consumer_meta.timestamp);
  835. }
  836. TEST_F(BufferHubQueueTest, TestCreateConsumerParcelable) {
  837. ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
  838. auto s1 = producer_queue_->CreateConsumerQueueParcelable();
  839. EXPECT_TRUE(s1.ok());
  840. ConsumerQueueParcelable output_parcelable = s1.take();
  841. EXPECT_TRUE(output_parcelable.IsValid());
  842. // Write to a Parcel new object.
  843. Parcel parcel;
  844. status_t res;
  845. res = output_parcelable.writeToParcel(&parcel);
  846. // Reset the data position so that we can read back from the same parcel
  847. // without doing actually Binder IPC.
  848. parcel.setDataPosition(0);
  849. // No consumer queue created yet.
  850. EXPECT_EQ(consumer_queue_, nullptr);
  851. // If the parcel contains a consumer queue, read into a
  852. // ProducerQueueParcelable should fail.
  853. ProducerQueueParcelable wrongly_typed_parcelable;
  854. EXPECT_FALSE(wrongly_typed_parcelable.IsValid());
  855. res = wrongly_typed_parcelable.readFromParcel(&parcel);
  856. EXPECT_EQ(res, -EINVAL);
  857. parcel.setDataPosition(0);
  858. // Create the consumer queue from the parcel.
  859. ConsumerQueueParcelable input_parcelable;
  860. EXPECT_FALSE(input_parcelable.IsValid());
  861. res = input_parcelable.readFromParcel(&parcel);
  862. EXPECT_EQ(res, OK);
  863. EXPECT_TRUE(input_parcelable.IsValid());
  864. consumer_queue_ = ConsumerQueue::Import(input_parcelable.TakeChannelHandle());
  865. EXPECT_FALSE(input_parcelable.IsValid());
  866. ASSERT_NE(consumer_queue_, nullptr);
  867. EXPECT_NO_FATAL_FAILURE(AllocateBuffer());
  868. EXPECT_EQ(producer_queue_->count(), 1U);
  869. EXPECT_EQ(producer_queue_->capacity(), 1U);
  870. size_t slot;
  871. DvrNativeBufferMetadata producer_meta;
  872. DvrNativeBufferMetadata consumer_meta;
  873. LocalHandle fence;
  874. auto s2 = producer_queue_->Dequeue(0, &slot, &producer_meta, &fence);
  875. EXPECT_TRUE(s2.ok());
  876. std::shared_ptr<ProducerBuffer> p1 = s2.take();
  877. ASSERT_NE(p1, nullptr);
  878. producer_meta.timestamp = 42;
  879. EXPECT_EQ(p1->PostAsync(&producer_meta, LocalHandle()), 0);
  880. // Make sure the buffer can be dequeued from consumer side.
  881. auto s3 = consumer_queue_->Dequeue(kTimeoutMs, &slot, &consumer_meta, &fence);
  882. EXPECT_TRUE(s3.ok()) << s3.GetErrorMessage();
  883. EXPECT_EQ(consumer_queue_->capacity(), 1U);
  884. auto consumer = s3.take();
  885. ASSERT_NE(consumer, nullptr);
  886. EXPECT_EQ(producer_meta.timestamp, consumer_meta.timestamp);
  887. }
  888. } // namespace
  889. } // namespace dvr
  890. } // namespace android