MtpFfsHandle.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. /*
  2. * Copyright (C) 2016 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <android-base/logging.h>
  17. #include <android-base/properties.h>
  18. #include <asyncio/AsyncIO.h>
  19. #include <dirent.h>
  20. #include <errno.h>
  21. #include <fcntl.h>
  22. #include <memory>
  23. #include <stdio.h>
  24. #include <stdlib.h>
  25. #include <string.h>
  26. #include <sys/eventfd.h>
  27. #include <sys/ioctl.h>
  28. #include <sys/mman.h>
  29. #include <sys/poll.h>
  30. #include <sys/stat.h>
  31. #include <sys/types.h>
  32. #include <unistd.h>
  33. #include "PosixAsyncIO.h"
  34. #include "MtpDescriptors.h"
  35. #include "MtpFfsHandle.h"
  36. #include "mtp.h"
  37. namespace {
  38. constexpr unsigned AIO_BUFS_MAX = 128;
  39. constexpr unsigned AIO_BUF_LEN = 16384;
  40. constexpr unsigned FFS_NUM_EVENTS = 5;
  41. constexpr unsigned MAX_FILE_CHUNK_SIZE = AIO_BUFS_MAX * AIO_BUF_LEN;
  42. constexpr uint32_t MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
  43. // Note: POLL_TIMEOUT_MS = 0 means return immediately i.e. no sleep.
  44. // And this will cause high CPU usage.
  45. constexpr int32_t POLL_TIMEOUT_MS = 500;
  46. struct timespec ZERO_TIMEOUT = { 0, 0 };
  47. struct mtp_device_status {
  48. uint16_t wLength;
  49. uint16_t wCode;
  50. };
  51. } // anonymous namespace
  52. namespace android {
  53. int MtpFfsHandle::getPacketSize(int ffs_fd) {
  54. struct usb_endpoint_descriptor desc;
  55. if (ioctl(ffs_fd, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&desc))) {
  56. PLOG(ERROR) << "Could not get FFS bulk-in descriptor";
  57. return MAX_PACKET_SIZE_HS;
  58. } else {
  59. return desc.wMaxPacketSize;
  60. }
  61. }
  62. MtpFfsHandle::MtpFfsHandle(int controlFd) {
  63. mControl.reset(controlFd);
  64. }
  65. MtpFfsHandle::~MtpFfsHandle() {}
  66. void MtpFfsHandle::closeEndpoints() {
  67. mIntr.reset();
  68. mBulkIn.reset();
  69. mBulkOut.reset();
  70. }
  71. bool MtpFfsHandle::openEndpoints(bool ptp) {
  72. if (mBulkIn < 0) {
  73. mBulkIn.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN, O_RDWR)));
  74. if (mBulkIn < 0) {
  75. PLOG(ERROR) << (ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN) << ": cannot open bulk in ep";
  76. return false;
  77. }
  78. }
  79. if (mBulkOut < 0) {
  80. mBulkOut.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT, O_RDWR)));
  81. if (mBulkOut < 0) {
  82. PLOG(ERROR) << (ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT) << ": cannot open bulk out ep";
  83. return false;
  84. }
  85. }
  86. if (mIntr < 0) {
  87. mIntr.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR, O_RDWR)));
  88. if (mIntr < 0) {
  89. PLOG(ERROR) << (ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR) << ": cannot open intr ep";
  90. return false;
  91. }
  92. }
  93. return true;
  94. }
  95. void MtpFfsHandle::advise(int fd) {
  96. for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
  97. if (posix_madvise(mIobuf[i].bufs.data(), MAX_FILE_CHUNK_SIZE,
  98. POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED) < 0)
  99. PLOG(ERROR) << "Failed to madvise";
  100. }
  101. if (posix_fadvise(fd, 0, 0,
  102. POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED) < 0)
  103. PLOG(ERROR) << "Failed to fadvise";
  104. }
  105. bool MtpFfsHandle::writeDescriptors(bool ptp) {
  106. return ::android::writeDescriptors(mControl, ptp);
  107. }
  108. void MtpFfsHandle::closeConfig() {
  109. mControl.reset();
  110. }
  111. int MtpFfsHandle::doAsync(void* data, size_t len, bool read, bool zero_packet) {
  112. struct io_event ioevs[AIO_BUFS_MAX];
  113. size_t total = 0;
  114. while (total < len) {
  115. size_t this_len = std::min(len - total, static_cast<size_t>(AIO_BUF_LEN * AIO_BUFS_MAX));
  116. int num_bufs = this_len / AIO_BUF_LEN + (this_len % AIO_BUF_LEN == 0 ? 0 : 1);
  117. for (int i = 0; i < num_bufs; i++) {
  118. mIobuf[0].buf[i] = reinterpret_cast<unsigned char*>(data) + total + i * AIO_BUF_LEN;
  119. }
  120. int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, this_len, read);
  121. if (ret < 0) return -1;
  122. ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
  123. if (ret < 0) return -1;
  124. total += ret;
  125. if (static_cast<size_t>(ret) < this_len) break;
  126. }
  127. int packet_size = getPacketSize(read ? mBulkOut : mBulkIn);
  128. if (len % packet_size == 0 && zero_packet) {
  129. int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, 0, read);
  130. if (ret < 0) return -1;
  131. ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
  132. if (ret < 0) return -1;
  133. }
  134. for (unsigned i = 0; i < AIO_BUFS_MAX; i++) {
  135. mIobuf[0].buf[i] = mIobuf[0].bufs.data() + i * AIO_BUF_LEN;
  136. }
  137. return total;
  138. }
  139. int MtpFfsHandle::read(void* data, size_t len) {
  140. // Zero packets are handled by receiveFile()
  141. return doAsync(data, len, true, false);
  142. }
  143. int MtpFfsHandle::write(const void* data, size_t len) {
  144. return doAsync(const_cast<void*>(data), len, false, true);
  145. }
  146. int MtpFfsHandle::handleEvent() {
  147. std::vector<usb_functionfs_event> events(FFS_NUM_EVENTS);
  148. usb_functionfs_event *event = events.data();
  149. int nbytes = TEMP_FAILURE_RETRY(::read(mControl, event,
  150. events.size() * sizeof(usb_functionfs_event)));
  151. if (nbytes == -1) {
  152. return -1;
  153. }
  154. int ret = 0;
  155. for (size_t n = nbytes / sizeof *event; n; --n, ++event) {
  156. switch (event->type) {
  157. case FUNCTIONFS_BIND:
  158. case FUNCTIONFS_ENABLE:
  159. ret = 0;
  160. errno = 0;
  161. break;
  162. case FUNCTIONFS_UNBIND:
  163. case FUNCTIONFS_DISABLE:
  164. errno = ESHUTDOWN;
  165. ret = -1;
  166. break;
  167. case FUNCTIONFS_SETUP:
  168. if (handleControlRequest(&event->u.setup) == -1)
  169. ret = -1;
  170. break;
  171. case FUNCTIONFS_SUSPEND:
  172. case FUNCTIONFS_RESUME:
  173. break;
  174. default:
  175. LOG(ERROR) << "Mtp Event " << event->type << " (unknown)";
  176. }
  177. }
  178. return ret;
  179. }
  180. int MtpFfsHandle::handleControlRequest(const struct usb_ctrlrequest *setup) {
  181. uint8_t type = setup->bRequestType;
  182. uint8_t code = setup->bRequest;
  183. uint16_t length = setup->wLength;
  184. uint16_t index = setup->wIndex;
  185. uint16_t value = setup->wValue;
  186. std::vector<char> buf;
  187. buf.resize(length);
  188. if (!(type & USB_DIR_IN)) {
  189. if (::read(mControl, buf.data(), length) != length) {
  190. PLOG(ERROR) << "Mtp error ctrlreq read data";
  191. }
  192. }
  193. if ((type & USB_TYPE_MASK) == USB_TYPE_CLASS && index == 0 && value == 0) {
  194. switch(code) {
  195. case MTP_REQ_RESET:
  196. case MTP_REQ_CANCEL:
  197. errno = ECANCELED;
  198. return -1;
  199. // break;
  200. case MTP_REQ_GET_DEVICE_STATUS:
  201. {
  202. if (length < sizeof(struct mtp_device_status) + 4) {
  203. errno = EINVAL;
  204. return -1;
  205. }
  206. struct mtp_device_status *st = reinterpret_cast<struct mtp_device_status*>(buf.data());
  207. st->wLength = htole16(sizeof(st));
  208. if (mCanceled) {
  209. st->wLength += 4;
  210. st->wCode = MTP_RESPONSE_TRANSACTION_CANCELLED;
  211. uint16_t *endpoints = reinterpret_cast<uint16_t*>(st + 1);
  212. endpoints[0] = ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_REVMAP);
  213. endpoints[1] = ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_REVMAP);
  214. mCanceled = false;
  215. } else {
  216. st->wCode = MTP_RESPONSE_OK;
  217. }
  218. length = st->wLength;
  219. break;
  220. }
  221. default:
  222. LOG(ERROR) << "Unrecognized Mtp class request! " << code;
  223. }
  224. } else {
  225. LOG(ERROR) << "Unrecognized request type " << type;
  226. }
  227. if (type & USB_DIR_IN) {
  228. if (::write(mControl, buf.data(), length) != length) {
  229. PLOG(ERROR) << "Mtp error ctrlreq write data";
  230. }
  231. }
  232. return 0;
  233. }
  234. int MtpFfsHandle::start(bool ptp) {
  235. if (!openEndpoints(ptp))
  236. return -1;
  237. for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
  238. mIobuf[i].bufs.resize(MAX_FILE_CHUNK_SIZE);
  239. mIobuf[i].iocb.resize(AIO_BUFS_MAX);
  240. mIobuf[i].iocbs.resize(AIO_BUFS_MAX);
  241. mIobuf[i].buf.resize(AIO_BUFS_MAX);
  242. for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
  243. mIobuf[i].buf[j] = mIobuf[i].bufs.data() + j * AIO_BUF_LEN;
  244. mIobuf[i].iocb[j] = &mIobuf[i].iocbs[j];
  245. }
  246. }
  247. memset(&mCtx, 0, sizeof(mCtx));
  248. if (io_setup(AIO_BUFS_MAX, &mCtx) < 0) {
  249. PLOG(ERROR) << "unable to setup aio";
  250. return -1;
  251. }
  252. mEventFd.reset(eventfd(0, EFD_NONBLOCK));
  253. mPollFds[0].fd = mControl;
  254. mPollFds[0].events = POLLIN;
  255. mPollFds[1].fd = mEventFd;
  256. mPollFds[1].events = POLLIN;
  257. mCanceled = false;
  258. return 0;
  259. }
  260. void MtpFfsHandle::close() {
  261. io_destroy(mCtx);
  262. closeEndpoints();
  263. closeConfig();
  264. }
  265. int MtpFfsHandle::waitEvents(struct io_buffer *buf, int min_events, struct io_event *events,
  266. int *counter) {
  267. int num_events = 0;
  268. int ret = 0;
  269. int error = 0;
  270. while (num_events < min_events) {
  271. if (poll(mPollFds, 2, POLL_TIMEOUT_MS) == -1) {
  272. PLOG(ERROR) << "Mtp error during poll()";
  273. return -1;
  274. }
  275. if (mPollFds[0].revents & POLLIN) {
  276. mPollFds[0].revents = 0;
  277. if (handleEvent() == -1) {
  278. error = errno;
  279. }
  280. }
  281. if (mPollFds[1].revents & POLLIN) {
  282. mPollFds[1].revents = 0;
  283. uint64_t ev_cnt = 0;
  284. if (::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1) {
  285. PLOG(ERROR) << "Mtp unable to read eventfd";
  286. error = errno;
  287. continue;
  288. }
  289. // It's possible that io_getevents will return more events than the eventFd reported,
  290. // since events may appear in the time between the calls. In this case, the eventFd will
  291. // show up as readable next iteration, but there will be fewer or no events to actually
  292. // wait for. Thus we never want io_getevents to block.
  293. int this_events = TEMP_FAILURE_RETRY(io_getevents(mCtx, 0, AIO_BUFS_MAX, events, &ZERO_TIMEOUT));
  294. if (this_events == -1) {
  295. PLOG(ERROR) << "Mtp error getting events";
  296. error = errno;
  297. }
  298. // Add up the total amount of data and find errors on the way.
  299. for (unsigned j = 0; j < static_cast<unsigned>(this_events); j++) {
  300. if (events[j].res < 0) {
  301. errno = -events[j].res;
  302. PLOG(ERROR) << "Mtp got error event at " << j << " and " << buf->actual << " total";
  303. error = errno;
  304. }
  305. ret += events[j].res;
  306. }
  307. num_events += this_events;
  308. if (counter)
  309. *counter += this_events;
  310. }
  311. if (error) {
  312. errno = error;
  313. ret = -1;
  314. break;
  315. }
  316. }
  317. return ret;
  318. }
  319. void MtpFfsHandle::cancelTransaction() {
  320. // Device cancels by stalling both bulk endpoints.
  321. if (::read(mBulkIn, nullptr, 0) != -1 || errno != EBADMSG)
  322. PLOG(ERROR) << "Mtp stall failed on bulk in";
  323. if (::write(mBulkOut, nullptr, 0) != -1 || errno != EBADMSG)
  324. PLOG(ERROR) << "Mtp stall failed on bulk out";
  325. mCanceled = true;
  326. errno = ECANCELED;
  327. }
  328. int MtpFfsHandle::cancelEvents(struct iocb **iocb, struct io_event *events, unsigned start,
  329. unsigned end) {
  330. // Some manpages for io_cancel are out of date and incorrect.
  331. // io_cancel will return -EINPROGRESS on success and does
  332. // not place the event in the given memory. We have to use
  333. // io_getevents to wait for all the events we cancelled.
  334. int ret = 0;
  335. unsigned num_events = 0;
  336. int save_errno = errno;
  337. errno = 0;
  338. for (unsigned j = start; j < end; j++) {
  339. if (io_cancel(mCtx, iocb[j], nullptr) != -1 || errno != EINPROGRESS) {
  340. PLOG(ERROR) << "Mtp couldn't cancel request " << j;
  341. } else {
  342. num_events++;
  343. }
  344. }
  345. if (num_events != end - start) {
  346. ret = -1;
  347. errno = EIO;
  348. }
  349. int evs = TEMP_FAILURE_RETRY(io_getevents(mCtx, num_events, AIO_BUFS_MAX, events, nullptr));
  350. if (static_cast<unsigned>(evs) != num_events) {
  351. PLOG(ERROR) << "Mtp couldn't cancel all requests, got " << evs;
  352. ret = -1;
  353. }
  354. uint64_t ev_cnt = 0;
  355. if (num_events && ::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1)
  356. PLOG(ERROR) << "Mtp Unable to read event fd";
  357. if (ret == 0) {
  358. // Restore errno since it probably got overriden with EINPROGRESS.
  359. errno = save_errno;
  360. }
  361. return ret;
  362. }
  363. int MtpFfsHandle::iobufSubmit(struct io_buffer *buf, int fd, unsigned length, bool read) {
  364. int ret = 0;
  365. buf->actual = AIO_BUFS_MAX;
  366. for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
  367. unsigned rq_length = std::min(AIO_BUF_LEN, length - AIO_BUF_LEN * j);
  368. io_prep(buf->iocb[j], fd, buf->buf[j], rq_length, 0, read);
  369. buf->iocb[j]->aio_flags |= IOCB_FLAG_RESFD;
  370. buf->iocb[j]->aio_resfd = mEventFd;
  371. // Not enough data, so table is truncated.
  372. if (rq_length < AIO_BUF_LEN || length == AIO_BUF_LEN * (j + 1)) {
  373. buf->actual = j + 1;
  374. break;
  375. }
  376. }
  377. ret = io_submit(mCtx, buf->actual, buf->iocb.data());
  378. if (ret != static_cast<int>(buf->actual)) {
  379. PLOG(ERROR) << "Mtp io_submit got " << ret << " expected " << buf->actual;
  380. if (ret != -1) {
  381. errno = EIO;
  382. }
  383. ret = -1;
  384. }
  385. return ret;
  386. }
  387. int MtpFfsHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
  388. // When receiving files, the incoming length is given in 32 bits.
  389. // A >=4G file is given as 0xFFFFFFFF
  390. uint32_t file_length = mfr.length;
  391. uint64_t offset = mfr.offset;
  392. struct aiocb aio;
  393. aio.aio_fildes = mfr.fd;
  394. aio.aio_buf = nullptr;
  395. struct aiocb *aiol[] = {&aio};
  396. int ret = -1;
  397. unsigned i = 0;
  398. size_t length;
  399. struct io_event ioevs[AIO_BUFS_MAX];
  400. bool has_write = false;
  401. bool error = false;
  402. bool write_error = false;
  403. int packet_size = getPacketSize(mBulkOut);
  404. bool short_packet = false;
  405. advise(mfr.fd);
  406. // Break down the file into pieces that fit in buffers
  407. while (file_length > 0 || has_write) {
  408. // Queue an asynchronous read from USB.
  409. if (file_length > 0) {
  410. length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
  411. if (iobufSubmit(&mIobuf[i], mBulkOut, length, true) == -1)
  412. error = true;
  413. }
  414. // Get the return status of the last write request.
  415. if (has_write) {
  416. aio_suspend(aiol, 1, nullptr);
  417. int written = aio_return(&aio);
  418. if (static_cast<size_t>(written) < aio.aio_nbytes) {
  419. errno = written == -1 ? aio_error(&aio) : EIO;
  420. PLOG(ERROR) << "Mtp error writing to disk";
  421. write_error = true;
  422. }
  423. has_write = false;
  424. }
  425. if (error) {
  426. return -1;
  427. }
  428. // Get the result of the read request, and queue a write to disk.
  429. if (file_length > 0) {
  430. unsigned num_events = 0;
  431. ret = 0;
  432. unsigned short_i = mIobuf[i].actual;
  433. while (num_events < short_i) {
  434. // Get all events up to the short read, if there is one.
  435. // We must wait for each event since data transfer could end at any time.
  436. int this_events = 0;
  437. int event_ret = waitEvents(&mIobuf[i], 1, ioevs, &this_events);
  438. num_events += this_events;
  439. if (event_ret == -1) {
  440. cancelEvents(mIobuf[i].iocb.data(), ioevs, num_events, mIobuf[i].actual);
  441. return -1;
  442. }
  443. ret += event_ret;
  444. for (int j = 0; j < this_events; j++) {
  445. // struct io_event contains a pointer to the associated struct iocb as a __u64.
  446. if (static_cast<__u64>(ioevs[j].res) <
  447. reinterpret_cast<struct iocb*>(ioevs[j].obj)->aio_nbytes) {
  448. // We've found a short event. Store the index since
  449. // events won't necessarily arrive in the order they are queued.
  450. short_i = (ioevs[j].obj - reinterpret_cast<uint64_t>(mIobuf[i].iocbs.data()))
  451. / sizeof(struct iocb) + 1;
  452. short_packet = true;
  453. }
  454. }
  455. }
  456. if (short_packet) {
  457. if (cancelEvents(mIobuf[i].iocb.data(), ioevs, short_i, mIobuf[i].actual)) {
  458. write_error = true;
  459. }
  460. }
  461. if (file_length == MAX_MTP_FILE_SIZE) {
  462. // For larger files, receive until a short packet is received.
  463. if (static_cast<size_t>(ret) < length) {
  464. file_length = 0;
  465. }
  466. } else if (ret < static_cast<int>(length)) {
  467. // If file is less than 4G and we get a short packet, it's an error.
  468. errno = EIO;
  469. LOG(ERROR) << "Mtp got unexpected short packet";
  470. return -1;
  471. } else {
  472. file_length -= ret;
  473. }
  474. if (write_error) {
  475. cancelTransaction();
  476. return -1;
  477. }
  478. // Enqueue a new write request
  479. aio_prepare(&aio, mIobuf[i].bufs.data(), ret, offset);
  480. aio_write(&aio);
  481. offset += ret;
  482. i = (i + 1) % NUM_IO_BUFS;
  483. has_write = true;
  484. }
  485. }
  486. if ((ret % packet_size == 0 && !short_packet) || zero_packet) {
  487. // Receive an empty packet if size is a multiple of the endpoint size
  488. // and we didn't already get an empty packet from the header or large file.
  489. if (read(mIobuf[0].bufs.data(), packet_size) != 0) {
  490. return -1;
  491. }
  492. }
  493. return 0;
  494. }
  495. int MtpFfsHandle::sendFile(mtp_file_range mfr) {
  496. uint64_t file_length = mfr.length;
  497. uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
  498. file_length + sizeof(mtp_data_header));
  499. uint64_t offset = mfr.offset;
  500. int packet_size = getPacketSize(mBulkIn);
  501. // If file_length is larger than a size_t, truncating would produce the wrong comparison.
  502. // Instead, promote the left side to 64 bits, then truncate the small result.
  503. int init_read_len = std::min(
  504. static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
  505. advise(mfr.fd);
  506. struct aiocb aio;
  507. aio.aio_fildes = mfr.fd;
  508. struct aiocb *aiol[] = {&aio};
  509. int ret = 0;
  510. int length, num_read;
  511. unsigned i = 0;
  512. struct io_event ioevs[AIO_BUFS_MAX];
  513. bool error = false;
  514. bool has_write = false;
  515. // Send the header data
  516. mtp_data_header *header = reinterpret_cast<mtp_data_header*>(mIobuf[0].bufs.data());
  517. header->length = htole32(given_length);
  518. header->type = htole16(2); // data packet
  519. header->command = htole16(mfr.command);
  520. header->transaction_id = htole32(mfr.transaction_id);
  521. // Some hosts don't support header/data separation even though MTP allows it
  522. // Handle by filling first packet with initial file data
  523. if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
  524. sizeof(mtp_data_header), init_read_len, offset))
  525. != init_read_len) return -1;
  526. if (doAsync(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len,
  527. false, false /* zlps are handled below */) == -1)
  528. return -1;
  529. file_length -= init_read_len;
  530. offset += init_read_len;
  531. ret = init_read_len + sizeof(mtp_data_header);
  532. // Break down the file into pieces that fit in buffers
  533. while(file_length > 0 || has_write) {
  534. if (file_length > 0) {
  535. // Queue up a read from disk.
  536. length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
  537. aio_prepare(&aio, mIobuf[i].bufs.data(), length, offset);
  538. aio_read(&aio);
  539. }
  540. if (has_write) {
  541. // Wait for usb write. Cancel unwritten portion if there's an error.
  542. int num_events = 0;
  543. if (waitEvents(&mIobuf[(i-1)%NUM_IO_BUFS], mIobuf[(i-1)%NUM_IO_BUFS].actual, ioevs,
  544. &num_events) != ret) {
  545. error = true;
  546. cancelEvents(mIobuf[(i-1)%NUM_IO_BUFS].iocb.data(), ioevs, num_events,
  547. mIobuf[(i-1)%NUM_IO_BUFS].actual);
  548. }
  549. has_write = false;
  550. }
  551. if (file_length > 0) {
  552. // Wait for the previous read to finish
  553. aio_suspend(aiol, 1, nullptr);
  554. num_read = aio_return(&aio);
  555. if (static_cast<size_t>(num_read) < aio.aio_nbytes) {
  556. errno = num_read == -1 ? aio_error(&aio) : EIO;
  557. PLOG(ERROR) << "Mtp error reading from disk";
  558. cancelTransaction();
  559. return -1;
  560. }
  561. file_length -= num_read;
  562. offset += num_read;
  563. if (error) {
  564. return -1;
  565. }
  566. // Queue up a write to usb.
  567. if (iobufSubmit(&mIobuf[i], mBulkIn, num_read, false) == -1) {
  568. return -1;
  569. }
  570. has_write = true;
  571. ret = num_read;
  572. }
  573. i = (i + 1) % NUM_IO_BUFS;
  574. }
  575. if (ret % packet_size == 0) {
  576. // If the last packet wasn't short, send a final empty packet
  577. if (write(mIobuf[0].bufs.data(), 0) != 0) {
  578. return -1;
  579. }
  580. }
  581. return 0;
  582. }
  583. int MtpFfsHandle::sendEvent(mtp_event me) {
  584. // Mimic the behavior of f_mtp by sending the event async.
  585. // Events aren't critical to the connection, so we don't need to check the return value.
  586. char *temp = new char[me.length];
  587. memcpy(temp, me.data, me.length);
  588. me.data = temp;
  589. std::thread t([this, me]() { return this->doSendEvent(me); });
  590. t.detach();
  591. return 0;
  592. }
  593. void MtpFfsHandle::doSendEvent(mtp_event me) {
  594. unsigned length = me.length;
  595. int ret = ::write(mIntr, me.data, length);
  596. if (static_cast<unsigned>(ret) != length)
  597. PLOG(ERROR) << "Mtp error sending event thread!";
  598. delete[] reinterpret_cast<char*>(me.data);
  599. }
  600. } // namespace android