mq_test.cpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809
  1. /*
  2. * Copyright (C) 2016 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <asm-generic/mman.h>
  17. #include <gtest/gtest.h>
  18. #include <atomic>
  19. #include <cstdlib>
  20. #include <sstream>
  21. #include <thread>
  22. #include <fmq/MessageQueue.h>
  23. #include <fmq/EventFlag.h>
  24. enum EventFlagBits : uint32_t {
  25. kFmqNotEmpty = 1 << 0,
  26. kFmqNotFull = 1 << 1,
  27. };
  28. typedef android::hardware::MessageQueue<uint8_t, android::hardware::kSynchronizedReadWrite>
  29. MessageQueueSync;
  30. typedef android::hardware::MessageQueue<uint8_t, android::hardware::kUnsynchronizedWrite>
  31. MessageQueueUnsync;
  32. class SynchronizedReadWrites : public ::testing::Test {
  33. protected:
  34. virtual void TearDown() {
  35. delete mQueue;
  36. }
  37. virtual void SetUp() {
  38. static constexpr size_t kNumElementsInQueue = 2048;
  39. mQueue = new (std::nothrow) MessageQueueSync(kNumElementsInQueue);
  40. ASSERT_NE(nullptr, mQueue);
  41. ASSERT_TRUE(mQueue->isValid());
  42. mNumMessagesMax = mQueue->getQuantumCount();
  43. ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
  44. }
  45. MessageQueueSync* mQueue = nullptr;
  46. size_t mNumMessagesMax = 0;
  47. };
  48. class UnsynchronizedWrite : public ::testing::Test {
  49. protected:
  50. virtual void TearDown() {
  51. delete mQueue;
  52. }
  53. virtual void SetUp() {
  54. static constexpr size_t kNumElementsInQueue = 2048;
  55. mQueue = new (std::nothrow) MessageQueueUnsync(kNumElementsInQueue);
  56. ASSERT_NE(nullptr, mQueue);
  57. ASSERT_TRUE(mQueue->isValid());
  58. mNumMessagesMax = mQueue->getQuantumCount();
  59. ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
  60. }
  61. MessageQueueUnsync* mQueue = nullptr;
  62. size_t mNumMessagesMax = 0;
  63. };
  64. class BlockingReadWrites : public ::testing::Test {
  65. protected:
  66. virtual void TearDown() {
  67. delete mQueue;
  68. }
  69. virtual void SetUp() {
  70. static constexpr size_t kNumElementsInQueue = 2048;
  71. mQueue = new (std::nothrow) MessageQueueSync(kNumElementsInQueue);
  72. ASSERT_NE(nullptr, mQueue);
  73. ASSERT_TRUE(mQueue->isValid());
  74. mNumMessagesMax = mQueue->getQuantumCount();
  75. ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
  76. /*
  77. * Initialize the EventFlag word to indicate Queue is not full.
  78. */
  79. std::atomic_init(&mFw, static_cast<uint32_t>(kFmqNotFull));
  80. }
  81. MessageQueueSync* mQueue;
  82. std::atomic<uint32_t> mFw;
  83. size_t mNumMessagesMax = 0;
  84. };
  85. class QueueSizeOdd : public ::testing::Test {
  86. protected:
  87. virtual void TearDown() {
  88. delete mQueue;
  89. }
  90. virtual void SetUp() {
  91. static constexpr size_t kNumElementsInQueue = 2049;
  92. mQueue = new (std::nothrow) MessageQueueSync(kNumElementsInQueue,
  93. true /* configureEventFlagWord */);
  94. ASSERT_NE(nullptr, mQueue);
  95. ASSERT_TRUE(mQueue->isValid());
  96. mNumMessagesMax = mQueue->getQuantumCount();
  97. ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
  98. auto evFlagWordPtr = mQueue->getEventFlagWord();
  99. ASSERT_NE(nullptr, evFlagWordPtr);
  100. /*
  101. * Initialize the EventFlag word to indicate Queue is not full.
  102. */
  103. std::atomic_init(evFlagWordPtr, static_cast<uint32_t>(kFmqNotFull));
  104. }
  105. MessageQueueSync* mQueue;
  106. size_t mNumMessagesMax = 0;
  107. };
  108. class BadQueueConfig: public ::testing::Test {
  109. };
  110. /*
  111. * Utility function to initialize data to be written to the FMQ
  112. */
  113. inline void initData(uint8_t* data, size_t count) {
  114. for (size_t i = 0; i < count; i++) {
  115. data[i] = i & 0xFF;
  116. }
  117. }
  118. /*
  119. * This thread will attempt to read and block. When wait returns
  120. * it checks if the kFmqNotEmpty bit is actually set.
  121. * If the read is succesful, it signals Wake to kFmqNotFull.
  122. */
  123. void ReaderThreadBlocking(
  124. android::hardware::MessageQueue<uint8_t,
  125. android::hardware::kSynchronizedReadWrite>* fmq,
  126. std::atomic<uint32_t>* fwAddr) {
  127. const size_t dataLen = 64;
  128. uint8_t data[dataLen];
  129. android::hardware::EventFlag* efGroup = nullptr;
  130. android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
  131. ASSERT_EQ(android::NO_ERROR, status);
  132. ASSERT_NE(nullptr, efGroup);
  133. while (true) {
  134. uint32_t efState = 0;
  135. android::status_t ret = efGroup->wait(kFmqNotEmpty,
  136. &efState,
  137. 5000000000 /* timeoutNanoSeconds */);
  138. /*
  139. * Wait should not time out here after 5s
  140. */
  141. ASSERT_NE(android::TIMED_OUT, ret);
  142. if ((efState & kFmqNotEmpty) && fmq->read(data, dataLen)) {
  143. efGroup->wake(kFmqNotFull);
  144. break;
  145. }
  146. }
  147. status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
  148. ASSERT_EQ(android::NO_ERROR, status);
  149. }
  150. /*
  151. * This thread will attempt to read and block using the readBlocking() API and
  152. * passes in a pointer to an EventFlag object.
  153. */
  154. void ReaderThreadBlocking2(
  155. android::hardware::MessageQueue<uint8_t,
  156. android::hardware::kSynchronizedReadWrite>* fmq,
  157. std::atomic<uint32_t>* fwAddr) {
  158. const size_t dataLen = 64;
  159. uint8_t data[dataLen];
  160. android::hardware::EventFlag* efGroup = nullptr;
  161. android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
  162. ASSERT_EQ(android::NO_ERROR, status);
  163. ASSERT_NE(nullptr, efGroup);
  164. bool ret = fmq->readBlocking(data,
  165. dataLen,
  166. static_cast<uint32_t>(kFmqNotFull),
  167. static_cast<uint32_t>(kFmqNotEmpty),
  168. 5000000000 /* timeOutNanos */,
  169. efGroup);
  170. ASSERT_TRUE(ret);
  171. status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
  172. ASSERT_EQ(android::NO_ERROR, status);
  173. }
  174. TEST_F(BadQueueConfig, QueueSizeTooLarge) {
  175. typedef android::hardware::MessageQueue<uint16_t, android::hardware::kSynchronizedReadWrite>
  176. MessageQueueSync16;
  177. size_t numElementsInQueue = SIZE_MAX / sizeof(uint16_t) + 1;
  178. MessageQueueSync16 * fmq = new (std::nothrow) MessageQueueSync16(numElementsInQueue);
  179. ASSERT_NE(nullptr, fmq);
  180. /*
  181. * Should fail due to size being too large to fit into size_t.
  182. */
  183. ASSERT_FALSE(fmq->isValid());
  184. }
  185. /*
  186. * Test that basic blocking works. This test uses the non-blocking read()/write()
  187. * APIs.
  188. */
  189. TEST_F(BlockingReadWrites, SmallInputTest1) {
  190. const size_t dataLen = 64;
  191. uint8_t data[dataLen] = {0};
  192. android::hardware::EventFlag* efGroup = nullptr;
  193. android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
  194. ASSERT_EQ(android::NO_ERROR, status);
  195. ASSERT_NE(nullptr, efGroup);
  196. /*
  197. * Start a thread that will try to read and block on kFmqNotEmpty.
  198. */
  199. std::thread Reader(ReaderThreadBlocking, mQueue, &mFw);
  200. struct timespec waitTime = {0, 100 * 1000000};
  201. ASSERT_EQ(0, nanosleep(&waitTime, NULL));
  202. /*
  203. * After waiting for some time write into the FMQ
  204. * and call Wake on kFmqNotEmpty.
  205. */
  206. ASSERT_TRUE(mQueue->write(data, dataLen));
  207. status = efGroup->wake(kFmqNotEmpty);
  208. ASSERT_EQ(android::NO_ERROR, status);
  209. ASSERT_EQ(0, nanosleep(&waitTime, NULL));
  210. Reader.join();
  211. status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
  212. ASSERT_EQ(android::NO_ERROR, status);
  213. }
  214. /*
  215. * Test that basic blocking works. This test uses the
  216. * writeBlocking()/readBlocking() APIs.
  217. */
  218. TEST_F(BlockingReadWrites, SmallInputTest2) {
  219. const size_t dataLen = 64;
  220. uint8_t data[dataLen] = {0};
  221. android::hardware::EventFlag* efGroup = nullptr;
  222. android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
  223. ASSERT_EQ(android::NO_ERROR, status);
  224. ASSERT_NE(nullptr, efGroup);
  225. /*
  226. * Start a thread that will try to read and block on kFmqNotEmpty. It will
  227. * call wake() on kFmqNotFull when the read is successful.
  228. */
  229. std::thread Reader(ReaderThreadBlocking2, mQueue, &mFw);
  230. bool ret = mQueue->writeBlocking(data,
  231. dataLen,
  232. static_cast<uint32_t>(kFmqNotFull),
  233. static_cast<uint32_t>(kFmqNotEmpty),
  234. 5000000000 /* timeOutNanos */,
  235. efGroup);
  236. ASSERT_TRUE(ret);
  237. Reader.join();
  238. status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
  239. ASSERT_EQ(android::NO_ERROR, status);
  240. }
  241. /*
  242. * Test that basic blocking times out as intended.
  243. */
  244. TEST_F(BlockingReadWrites, BlockingTimeOutTest) {
  245. android::hardware::EventFlag* efGroup = nullptr;
  246. android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
  247. ASSERT_EQ(android::NO_ERROR, status);
  248. ASSERT_NE(nullptr, efGroup);
  249. /* Block on an EventFlag bit that no one will wake and time out in 1s */
  250. uint32_t efState = 0;
  251. android::status_t ret = efGroup->wait(kFmqNotEmpty,
  252. &efState,
  253. 1000000000 /* timeoutNanoSeconds */);
  254. /*
  255. * Wait should time out in a second.
  256. */
  257. EXPECT_EQ(android::TIMED_OUT, ret);
  258. status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
  259. ASSERT_EQ(android::NO_ERROR, status);
  260. }
  261. /*
  262. * Test that odd queue sizes do not cause unaligned error
  263. * on access to EventFlag object.
  264. */
  265. TEST_F(QueueSizeOdd, EventFlagTest) {
  266. const size_t dataLen = 64;
  267. uint8_t data[dataLen] = {0};
  268. bool ret = mQueue->writeBlocking(data,
  269. dataLen,
  270. static_cast<uint32_t>(kFmqNotFull),
  271. static_cast<uint32_t>(kFmqNotEmpty),
  272. 5000000000 /* timeOutNanos */);
  273. ASSERT_TRUE(ret);
  274. }
  275. /*
  276. * Verify that a few bytes of data can be successfully written and read.
  277. */
  278. TEST_F(SynchronizedReadWrites, SmallInputTest1) {
  279. const size_t dataLen = 16;
  280. ASSERT_LE(dataLen, mNumMessagesMax);
  281. uint8_t data[dataLen];
  282. initData(data, dataLen);
  283. ASSERT_TRUE(mQueue->write(data, dataLen));
  284. uint8_t readData[dataLen] = {};
  285. ASSERT_TRUE(mQueue->read(readData, dataLen));
  286. ASSERT_EQ(0, memcmp(data, readData, dataLen));
  287. }
  288. /*
  289. * Verify that a few bytes of data can be successfully written and read using
  290. * beginRead/beginWrite/CommitRead/CommitWrite
  291. */
  292. TEST_F(SynchronizedReadWrites, SmallInputTest2) {
  293. const size_t dataLen = 16;
  294. ASSERT_LE(dataLen, mNumMessagesMax);
  295. uint8_t data[dataLen];
  296. initData(data, dataLen);
  297. MessageQueueSync::MemTransaction tx;
  298. ASSERT_TRUE(mQueue->beginWrite(dataLen, &tx));
  299. ASSERT_TRUE(tx.copyTo(data, 0 /* startIdx */, dataLen));
  300. ASSERT_TRUE(mQueue->commitWrite(dataLen));
  301. uint8_t readData[dataLen] = {};
  302. ASSERT_TRUE(mQueue->beginRead(dataLen, &tx));
  303. ASSERT_TRUE(tx.copyFrom(readData, 0 /* startIdx */, dataLen));
  304. ASSERT_TRUE(mQueue->commitRead(dataLen));
  305. ASSERT_EQ(0, memcmp(data, readData, dataLen));
  306. }
  307. /*
  308. * Verify that a few bytes of data can be successfully written and read using
  309. * beginRead/beginWrite/CommitRead/CommitWrite as well as getSlot().
  310. */
  311. TEST_F(SynchronizedReadWrites, SmallInputTest3) {
  312. const size_t dataLen = 16;
  313. ASSERT_LE(dataLen, mNumMessagesMax);
  314. uint8_t data[dataLen];
  315. initData(data, dataLen);
  316. MessageQueueSync::MemTransaction tx;
  317. ASSERT_TRUE(mQueue->beginWrite(dataLen, &tx));
  318. auto first = tx.getFirstRegion();
  319. auto second = tx.getSecondRegion();
  320. ASSERT_EQ(first.getLength() + second.getLength(), dataLen);
  321. for (size_t i = 0; i < dataLen; i++) {
  322. uint8_t* ptr = tx.getSlot(i);
  323. *ptr = data[i];
  324. }
  325. ASSERT_TRUE(mQueue->commitWrite(dataLen));
  326. uint8_t readData[dataLen] = {};
  327. ASSERT_TRUE(mQueue->beginRead(dataLen, &tx));
  328. first = tx.getFirstRegion();
  329. second = tx.getSecondRegion();
  330. ASSERT_EQ(first.getLength() + second.getLength(), dataLen);
  331. for (size_t i = 0; i < dataLen; i++) {
  332. uint8_t* ptr = tx.getSlot(i);
  333. readData[i] = *ptr;
  334. }
  335. ASSERT_TRUE(mQueue->commitRead(dataLen));
  336. ASSERT_EQ(0, memcmp(data, readData, dataLen));
  337. }
  338. /*
  339. * Verify that read() returns false when trying to read from an empty queue.
  340. */
  341. TEST_F(SynchronizedReadWrites, ReadWhenEmpty1) {
  342. ASSERT_EQ(0UL, mQueue->availableToRead());
  343. const size_t dataLen = 2;
  344. ASSERT_LE(dataLen, mNumMessagesMax);
  345. uint8_t readData[dataLen];
  346. ASSERT_FALSE(mQueue->read(readData, dataLen));
  347. }
  348. /*
  349. * Verify that beginRead() returns a MemTransaction object with null pointers when trying
  350. * to read from an empty queue.
  351. */
  352. TEST_F(SynchronizedReadWrites, ReadWhenEmpty2) {
  353. ASSERT_EQ(0UL, mQueue->availableToRead());
  354. const size_t dataLen = 2;
  355. ASSERT_LE(dataLen, mNumMessagesMax);
  356. MessageQueueSync::MemTransaction tx;
  357. ASSERT_FALSE(mQueue->beginRead(dataLen, &tx));
  358. auto first = tx.getFirstRegion();
  359. auto second = tx.getSecondRegion();
  360. ASSERT_EQ(nullptr, first.getAddress());
  361. ASSERT_EQ(nullptr, second.getAddress());
  362. }
  363. /*
  364. * Write the queue until full. Verify that another write is unsuccessful.
  365. * Verify that availableToWrite() returns 0 as expected.
  366. */
  367. TEST_F(SynchronizedReadWrites, WriteWhenFull1) {
  368. ASSERT_EQ(0UL, mQueue->availableToRead());
  369. std::vector<uint8_t> data(mNumMessagesMax);
  370. initData(&data[0], mNumMessagesMax);
  371. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  372. ASSERT_EQ(0UL, mQueue->availableToWrite());
  373. ASSERT_FALSE(mQueue->write(&data[0], 1));
  374. std::vector<uint8_t> readData(mNumMessagesMax);
  375. ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
  376. ASSERT_EQ(data, readData);
  377. }
  378. /*
  379. * Write the queue until full. Verify that beginWrite() returns
  380. * a MemTransaction object with null base pointers.
  381. */
  382. TEST_F(SynchronizedReadWrites, WriteWhenFull2) {
  383. ASSERT_EQ(0UL, mQueue->availableToRead());
  384. std::vector<uint8_t> data(mNumMessagesMax);
  385. initData(&data[0], mNumMessagesMax);
  386. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  387. ASSERT_EQ(0UL, mQueue->availableToWrite());
  388. MessageQueueSync::MemTransaction tx;
  389. ASSERT_FALSE(mQueue->beginWrite(1, &tx));
  390. auto first = tx.getFirstRegion();
  391. auto second = tx.getSecondRegion();
  392. ASSERT_EQ(nullptr, first.getAddress());
  393. ASSERT_EQ(nullptr, second.getAddress());
  394. }
  395. /*
  396. * Write a chunk of data equal to the queue size.
  397. * Verify that the write is successful and the subsequent read
  398. * returns the expected data.
  399. */
  400. TEST_F(SynchronizedReadWrites, LargeInputTest1) {
  401. std::vector<uint8_t> data(mNumMessagesMax);
  402. initData(&data[0], mNumMessagesMax);
  403. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  404. std::vector<uint8_t> readData(mNumMessagesMax);
  405. ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
  406. ASSERT_EQ(data, readData);
  407. }
  408. /*
  409. * Attempt to write a chunk of data larger than the queue size.
  410. * Verify that it fails. Verify that a subsequent read fails and
  411. * the queue is still empty.
  412. */
  413. TEST_F(SynchronizedReadWrites, LargeInputTest2) {
  414. ASSERT_EQ(0UL, mQueue->availableToRead());
  415. const size_t dataLen = 4096;
  416. ASSERT_GT(dataLen, mNumMessagesMax);
  417. std::vector<uint8_t> data(dataLen);
  418. initData(&data[0], dataLen);
  419. ASSERT_FALSE(mQueue->write(&data[0], dataLen));
  420. std::vector<uint8_t> readData(mNumMessagesMax);
  421. ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
  422. ASSERT_NE(data, readData);
  423. ASSERT_EQ(0UL, mQueue->availableToRead());
  424. }
  425. /*
  426. * After the queue is full, try to write more data. Verify that
  427. * the attempt returns false. Verify that the attempt did not
  428. * affect the pre-existing data in the queue.
  429. */
  430. TEST_F(SynchronizedReadWrites, LargeInputTest3) {
  431. std::vector<uint8_t> data(mNumMessagesMax);
  432. initData(&data[0], mNumMessagesMax);
  433. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  434. ASSERT_FALSE(mQueue->write(&data[0], 1));
  435. std::vector<uint8_t> readData(mNumMessagesMax);
  436. ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
  437. ASSERT_EQ(data, readData);
  438. }
  439. /*
  440. * Verify that beginWrite() returns a MemTransaction with
  441. * null base pointers when attempting to write data larger
  442. * than the queue size.
  443. */
  444. TEST_F(SynchronizedReadWrites, LargeInputTest4) {
  445. ASSERT_EQ(0UL, mQueue->availableToRead());
  446. const size_t dataLen = 4096;
  447. ASSERT_GT(dataLen, mNumMessagesMax);
  448. MessageQueueSync::MemTransaction tx;
  449. ASSERT_FALSE(mQueue->beginWrite(dataLen, &tx));
  450. auto first = tx.getFirstRegion();
  451. auto second = tx.getSecondRegion();
  452. ASSERT_EQ(nullptr, first.getAddress());
  453. ASSERT_EQ(nullptr, second.getAddress());
  454. }
  455. /*
  456. * Verify that multiple reads one after the other return expected data.
  457. */
  458. TEST_F(SynchronizedReadWrites, MultipleRead) {
  459. const size_t chunkSize = 100;
  460. const size_t chunkNum = 5;
  461. const size_t dataLen = chunkSize * chunkNum;
  462. ASSERT_LE(dataLen, mNumMessagesMax);
  463. uint8_t data[dataLen];
  464. initData(data, dataLen);
  465. ASSERT_TRUE(mQueue->write(data, dataLen));
  466. uint8_t readData[dataLen] = {};
  467. for (size_t i = 0; i < chunkNum; i++) {
  468. ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
  469. }
  470. ASSERT_EQ(0, memcmp(readData, data, dataLen));
  471. }
  472. /*
  473. * Verify that multiple writes one after the other happens correctly.
  474. */
  475. TEST_F(SynchronizedReadWrites, MultipleWrite) {
  476. const int chunkSize = 100;
  477. const int chunkNum = 5;
  478. const size_t dataLen = chunkSize * chunkNum;
  479. ASSERT_LE(dataLen, mNumMessagesMax);
  480. uint8_t data[dataLen];
  481. initData(data, dataLen);
  482. for (unsigned int i = 0; i < chunkNum; i++) {
  483. ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
  484. }
  485. uint8_t readData[dataLen] = {};
  486. ASSERT_TRUE(mQueue->read(readData, dataLen));
  487. ASSERT_EQ(0, memcmp(readData, data, dataLen));
  488. }
  489. /*
  490. * Write enough messages into the FMQ to fill half of it
  491. * and read back the same.
  492. * Write mNumMessagesMax messages into the queue. This will cause a
  493. * wrap around. Read and verify the data.
  494. */
  495. TEST_F(SynchronizedReadWrites, ReadWriteWrapAround1) {
  496. size_t numMessages = mNumMessagesMax - 1;
  497. std::vector<uint8_t> data(mNumMessagesMax);
  498. std::vector<uint8_t> readData(mNumMessagesMax);
  499. initData(&data[0], mNumMessagesMax);
  500. ASSERT_TRUE(mQueue->write(&data[0], numMessages));
  501. ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
  502. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  503. ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
  504. ASSERT_EQ(data, readData);
  505. }
  506. /*
  507. * Use beginRead/CommitRead/beginWrite/commitWrite APIs
  508. * to test wrap arounds are handled correctly.
  509. * Write enough messages into the FMQ to fill half of it
  510. * and read back the same.
  511. * Write mNumMessagesMax messages into the queue. This will cause a
  512. * wrap around. Read and verify the data.
  513. */
  514. TEST_F(SynchronizedReadWrites, ReadWriteWrapAround2) {
  515. size_t dataLen = mNumMessagesMax - 1;
  516. std::vector<uint8_t> data(mNumMessagesMax);
  517. std::vector<uint8_t> readData(mNumMessagesMax);
  518. initData(&data[0], mNumMessagesMax);
  519. ASSERT_TRUE(mQueue->write(&data[0], dataLen));
  520. ASSERT_TRUE(mQueue->read(&readData[0], dataLen));
  521. /*
  522. * The next write and read will have to deal with with wrap arounds.
  523. */
  524. MessageQueueSync::MemTransaction tx;
  525. ASSERT_TRUE(mQueue->beginWrite(mNumMessagesMax, &tx));
  526. auto first = tx.getFirstRegion();
  527. auto second = tx.getSecondRegion();
  528. ASSERT_EQ(first.getLength() + second.getLength(), mNumMessagesMax);
  529. ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */, mNumMessagesMax));
  530. ASSERT_TRUE(mQueue->commitWrite(mNumMessagesMax));
  531. ASSERT_TRUE(mQueue->beginRead(mNumMessagesMax, &tx));
  532. first = tx.getFirstRegion();
  533. second = tx.getSecondRegion();
  534. ASSERT_EQ(first.getLength() + second.getLength(), mNumMessagesMax);
  535. ASSERT_TRUE(tx.copyFrom(&readData[0], 0 /* startIdx */, mNumMessagesMax));
  536. ASSERT_TRUE(mQueue->commitRead(mNumMessagesMax));
  537. ASSERT_EQ(data, readData);
  538. }
  539. /*
  540. * Verify that a few bytes of data can be successfully written and read.
  541. */
  542. TEST_F(UnsynchronizedWrite, SmallInputTest1) {
  543. const size_t dataLen = 16;
  544. ASSERT_LE(dataLen, mNumMessagesMax);
  545. uint8_t data[dataLen];
  546. initData(data, dataLen);
  547. ASSERT_TRUE(mQueue->write(data, dataLen));
  548. uint8_t readData[dataLen] = {};
  549. ASSERT_TRUE(mQueue->read(readData, dataLen));
  550. ASSERT_EQ(0, memcmp(data, readData, dataLen));
  551. }
  552. /*
  553. * Verify that read() returns false when trying to read from an empty queue.
  554. */
  555. TEST_F(UnsynchronizedWrite, ReadWhenEmpty) {
  556. ASSERT_EQ(0UL, mQueue->availableToRead());
  557. const size_t dataLen = 2;
  558. ASSERT_TRUE(dataLen < mNumMessagesMax);
  559. uint8_t readData[dataLen];
  560. ASSERT_FALSE(mQueue->read(readData, dataLen));
  561. }
  562. /*
  563. * Write the queue when full. Verify that a subsequent writes is succesful.
  564. * Verify that availableToWrite() returns 0 as expected.
  565. */
  566. TEST_F(UnsynchronizedWrite, WriteWhenFull1) {
  567. ASSERT_EQ(0UL, mQueue->availableToRead());
  568. std::vector<uint8_t> data(mNumMessagesMax);
  569. initData(&data[0], mNumMessagesMax);
  570. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  571. ASSERT_EQ(0UL, mQueue->availableToWrite());
  572. ASSERT_TRUE(mQueue->write(&data[0], 1));
  573. std::vector<uint8_t> readData(mNumMessagesMax);
  574. ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
  575. }
  576. /*
  577. * Write the queue when full. Verify that a subsequent writes
  578. * using beginRead()/commitRead() is succesful.
  579. * Verify that the next read fails as expected for unsynchronized flavor.
  580. */
  581. TEST_F(UnsynchronizedWrite, WriteWhenFull2) {
  582. ASSERT_EQ(0UL, mQueue->availableToRead());
  583. std::vector<uint8_t> data(mNumMessagesMax);
  584. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  585. MessageQueueUnsync::MemTransaction tx;
  586. ASSERT_TRUE(mQueue->beginWrite(1, &tx));
  587. ASSERT_EQ(tx.getFirstRegion().getLength(), 1U);
  588. ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */));
  589. ASSERT_TRUE(mQueue->commitWrite(1));
  590. std::vector<uint8_t> readData(mNumMessagesMax);
  591. ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
  592. }
  593. /*
  594. * Write a chunk of data equal to the queue size.
  595. * Verify that the write is successful and the subsequent read
  596. * returns the expected data.
  597. */
  598. TEST_F(UnsynchronizedWrite, LargeInputTest1) {
  599. std::vector<uint8_t> data(mNumMessagesMax);
  600. initData(&data[0], mNumMessagesMax);
  601. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  602. std::vector<uint8_t> readData(mNumMessagesMax);
  603. ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
  604. ASSERT_EQ(data, readData);
  605. }
  606. /*
  607. * Attempt to write a chunk of data larger than the queue size.
  608. * Verify that it fails. Verify that a subsequent read fails and
  609. * the queue is still empty.
  610. */
  611. TEST_F(UnsynchronizedWrite, LargeInputTest2) {
  612. ASSERT_EQ(0UL, mQueue->availableToRead());
  613. const size_t dataLen = 4096;
  614. ASSERT_GT(dataLen, mNumMessagesMax);
  615. std::vector<uint8_t> data(dataLen);
  616. initData(&data[0], dataLen);
  617. ASSERT_FALSE(mQueue->write(&data[0], dataLen));
  618. std::vector<uint8_t> readData(mNumMessagesMax);
  619. ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
  620. ASSERT_NE(data, readData);
  621. ASSERT_EQ(0UL, mQueue->availableToRead());
  622. }
  623. /*
  624. * After the queue is full, try to write more data. Verify that
  625. * the attempt is succesful. Verify that the read fails
  626. * as expected.
  627. */
  628. TEST_F(UnsynchronizedWrite, LargeInputTest3) {
  629. std::vector<uint8_t> data(mNumMessagesMax);
  630. initData(&data[0], mNumMessagesMax);
  631. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  632. ASSERT_TRUE(mQueue->write(&data[0], 1));
  633. std::vector<uint8_t> readData(mNumMessagesMax);
  634. ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
  635. }
  636. /*
  637. * Verify that multiple reads one after the other return expected data.
  638. */
  639. TEST_F(UnsynchronizedWrite, MultipleRead) {
  640. const size_t chunkSize = 100;
  641. const size_t chunkNum = 5;
  642. const size_t dataLen = chunkSize * chunkNum;
  643. ASSERT_LE(dataLen, mNumMessagesMax);
  644. uint8_t data[dataLen];
  645. initData(data, dataLen);
  646. ASSERT_TRUE(mQueue->write(data, dataLen));
  647. uint8_t readData[dataLen] = {};
  648. for (size_t i = 0; i < chunkNum; i++) {
  649. ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
  650. }
  651. ASSERT_EQ(0, memcmp(readData, data, dataLen));
  652. }
  653. /*
  654. * Verify that multiple writes one after the other happens correctly.
  655. */
  656. TEST_F(UnsynchronizedWrite, MultipleWrite) {
  657. const size_t chunkSize = 100;
  658. const size_t chunkNum = 5;
  659. const size_t dataLen = chunkSize * chunkNum;
  660. ASSERT_LE(dataLen, mNumMessagesMax);
  661. uint8_t data[dataLen];
  662. initData(data, dataLen);
  663. for (size_t i = 0; i < chunkNum; i++) {
  664. ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
  665. }
  666. uint8_t readData[dataLen] = {};
  667. ASSERT_TRUE(mQueue->read(readData, dataLen));
  668. ASSERT_EQ(0, memcmp(readData, data, dataLen));
  669. }
  670. /*
  671. * Write enough messages into the FMQ to fill half of it
  672. * and read back the same.
  673. * Write mNumMessagesMax messages into the queue. This will cause a
  674. * wrap around. Read and verify the data.
  675. */
  676. TEST_F(UnsynchronizedWrite, ReadWriteWrapAround) {
  677. size_t numMessages = mNumMessagesMax - 1;
  678. std::vector<uint8_t> data(mNumMessagesMax);
  679. std::vector<uint8_t> readData(mNumMessagesMax);
  680. initData(&data[0], mNumMessagesMax);
  681. ASSERT_TRUE(mQueue->write(&data[0], numMessages));
  682. ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
  683. ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
  684. ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
  685. ASSERT_EQ(data, readData);
  686. }