TransposeConv2D.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * Copyright (C) 2018 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "CpuOperationUtils.h"
  17. #include "OperationResolver.h"
  18. #include <cfloat>
  19. #include <cmath>
  20. #include "Tracing.h"
  21. #include "tensorflow/lite/kernels/internal/common.h"
  22. namespace android {
  23. namespace nn {
  24. namespace transpose_conv_2d {
  25. constexpr char kOperationName[] = "TRANSPOSE_CONV_2D";
  26. constexpr uint32_t kInputTensor = 0;
  27. constexpr uint32_t kFilterTensor = 1;
  28. constexpr uint32_t kBiasTensor = 2;
  29. constexpr uint32_t kNumOutputs = 1;
  30. constexpr uint32_t kOutputTensor = 0;
  31. namespace {
  32. // If possible we will use this static buffer for the tensor.
  33. constexpr size_t kStaticBufferSize = 1605632;
  34. char static_scratch_buffer[kStaticBufferSize];
  35. // executionMutex is used to protect concurrent access of the static_scratch_buffer.
  36. // std::mutex is safe for pthreads on Android.
  37. std::mutex executionMutex;
  38. struct TransposeConv2dParam {
  39. int32_t paddingLeft, paddingRight;
  40. int32_t paddingTop, paddingBottom;
  41. int32_t strideWidth, strideHeight;
  42. int32_t activation;
  43. bool useNchw = false;
  44. bool initialize(const IOperationExecutionContext* context) {
  45. uint32_t inCount = context->getNumInputs();
  46. int32_t paddingImplicit = 0;
  47. if (inCount == 9) {
  48. paddingImplicit = context->getInputValue<int32_t>(4);
  49. strideWidth = context->getInputValue<int32_t>(5);
  50. strideHeight = context->getInputValue<int32_t>(6);
  51. activation = context->getInputValue<int32_t>(7);
  52. useNchw = context->getInputValue<bool>(8);
  53. Shape filterShape = context->getInputShape(kFilterTensor);
  54. int32_t filterWidth = getSizeOfDimension(filterShape, 2);
  55. int32_t filterHeight = getSizeOfDimension(filterShape, 1);
  56. NN_RET_CHECK_EQ(getNumberOfDimensions(context->getInputShape(3)), 1);
  57. NN_RET_CHECK_EQ(getSizeOfDimension(context->getInputShape(3), 0), 4);
  58. const int32_t* outputShapeData = context->getInputBuffer<int32_t>(3);
  59. int32_t outputWidth = useNchw ? outputShapeData[3] : outputShapeData[2];
  60. int32_t outputHeight = useNchw ? outputShapeData[2] : outputShapeData[1];
  61. calculateExplicitPaddingTransposeConv(outputWidth, strideWidth, filterWidth,
  62. paddingImplicit, &paddingLeft, &paddingRight);
  63. calculateExplicitPaddingTransposeConv(outputHeight, strideHeight, filterHeight,
  64. paddingImplicit, &paddingTop, &paddingBottom);
  65. } else if (inCount == 11) {
  66. paddingLeft = context->getInputValue<int32_t>(3);
  67. paddingRight = context->getInputValue<int32_t>(4);
  68. paddingTop = context->getInputValue<int32_t>(5);
  69. paddingBottom = context->getInputValue<int32_t>(6);
  70. strideWidth = context->getInputValue<int32_t>(7);
  71. strideHeight = context->getInputValue<int32_t>(8);
  72. activation = context->getInputValue<int32_t>(9);
  73. useNchw = context->getInputValue<bool>(10);
  74. } else {
  75. NN_RET_CHECK_FAIL() << "Unsupported input spec for operation " << kOperationName;
  76. }
  77. // paddingRight and paddingBottom in transpose conv may be less than 0 to resolve the
  78. // ambiguous output shape issue in the case of stride > 1.
  79. NN_RET_CHECK_GE(paddingLeft, 0);
  80. NN_RET_CHECK_GE(paddingTop, 0);
  81. NN_RET_CHECK_GT(strideWidth, 0);
  82. NN_RET_CHECK_GT(strideHeight, 0);
  83. NN_RET_CHECK_GE(activation, 0);
  84. return true;
  85. }
  86. };
  87. #define ANDROID_NN_TRANSPOSE_CONV_PARAMETERS \
  88. uint32_t numBatches = getSizeOfDimension(inputShape, 0); \
  89. uint32_t inputHeight = getSizeOfDimension(inputShape, 1); \
  90. uint32_t inputWidth = getSizeOfDimension(inputShape, 2); \
  91. uint32_t inputDepth = getSizeOfDimension(inputShape, 3); \
  92. uint32_t filterHeight = getSizeOfDimension(filterShape, 1); \
  93. uint32_t filterWidth = getSizeOfDimension(filterShape, 2); \
  94. uint32_t outputHeight = getSizeOfDimension(outputShape, 1); \
  95. uint32_t outputWidth = getSizeOfDimension(outputShape, 2); \
  96. uint32_t outputDepth = getSizeOfDimension(outputShape, 3); \
  97. int32_t paddingLeft = param.paddingLeft, paddingRight = param.paddingRight; \
  98. int32_t paddingTop = param.paddingTop, paddingBottom = param.paddingBottom; \
  99. int32_t strideWidth = param.strideWidth, strideHeight = param.strideHeight; \
  100. int32_t activation = param.activation;
  101. bool transposeConvNhwc(const float* inputData, const Shape& inputShape, const float* filterData,
  102. const Shape& filterShape, const float* biasData, const Shape& biasShape,
  103. const TransposeConv2dParam& param, float* outputData,
  104. const Shape& outputShape) {
  105. NNTRACE_TRANS("transposeConvFloat32");
  106. ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
  107. float outputActivationMin = 0.0f, outputActivationMax = 0.0f;
  108. CalculateActivationRangeFloat(activation, &outputActivationMin, &outputActivationMax);
  109. memset(outputData, 0, getNumberOfElements(outputShape) * sizeof(float));
  110. const float* inputBase = inputData;
  111. float* outputBase = outputData;
  112. for (uint32_t b = 0; b < numBatches; b++) {
  113. for (uint32_t h = 0; h < inputHeight; h++) {
  114. for (uint32_t w = 0; w < inputWidth; w++) {
  115. int32_t wOutputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft;
  116. int32_t hOutputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop;
  117. const float* filterBase = filterData;
  118. for (uint32_t k = 0; k < outputDepth; k++) {
  119. for (uint32_t i = 0; i < filterHeight; i++) {
  120. for (uint32_t j = 0; j < filterWidth; j++, filterBase += inputDepth) {
  121. int32_t hOutput = hOutputOrigin + static_cast<int32_t>(i);
  122. int32_t wOutput = wOutputOrigin + static_cast<int32_t>(j);
  123. if (hOutput >= 0 && hOutput < static_cast<int32_t>(outputHeight) &&
  124. wOutput >= 0 && wOutput < static_cast<int32_t>(outputWidth)) {
  125. for (uint32_t d = 0; d < inputDepth; d++) {
  126. uint32_t outputIndex = hOutput * outputWidth * outputDepth +
  127. wOutput * outputDepth + k;
  128. outputBase[outputIndex] += inputBase[d] * filterBase[d];
  129. }
  130. }
  131. }
  132. }
  133. }
  134. inputBase += inputDepth;
  135. }
  136. }
  137. outputBase += outputHeight * outputWidth * outputDepth;
  138. }
  139. const uint32_t outerSize = numBatches * outputHeight * outputWidth;
  140. float* outPtr = outputData;
  141. for (uint32_t i = 0; i < outerSize; i++) {
  142. for (uint32_t d = 0; d < outputDepth; d++, outPtr++) {
  143. *outPtr += biasData[d];
  144. *outPtr = std::max(std::min(*outPtr, outputActivationMax), outputActivationMin);
  145. }
  146. }
  147. return true;
  148. }
  149. bool transposeConvNhwc(const uint8_t* inputData, const Shape& inputShape, const uint8_t* filterData,
  150. const Shape& filterShape, const int32_t* biasData, const Shape& biasShape,
  151. const TransposeConv2dParam& param, uint8_t* outputData,
  152. const Shape& outputShape) {
  153. NNTRACE_TRANS("transposeConvQuant8");
  154. ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
  155. int32_t* tempBuffer = nullptr;
  156. std::unique_ptr<int32_t[]> bufferGuard;
  157. uint32_t tempBufferByteSize = getNumberOfElements(outputShape) * sizeof(int32_t);
  158. if (tempBufferByteSize <= kStaticBufferSize) {
  159. tempBuffer = reinterpret_cast<int32_t*>(static_scratch_buffer);
  160. } else {
  161. tempBuffer = new (std::nothrow) int32_t[tempBufferByteSize / sizeof(int32_t)];
  162. if (tempBuffer == nullptr) {
  163. LOG(ERROR) << "ConvTranspose size is too large, not enough memory";
  164. return false;
  165. }
  166. bufferGuard.reset(tempBuffer);
  167. }
  168. int32_t inputOffset = -inputShape.offset;
  169. int32_t filterOffset = -filterShape.offset;
  170. int32_t outputOffset = outputShape.offset;
  171. double realMultiplier = 0.0;
  172. int32_t outputMultiplier = 0;
  173. int32_t outputShift = 0;
  174. NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape,
  175. &realMultiplier));
  176. int exponent;
  177. NN_RET_CHECK(QuantizeMultiplier(realMultiplier, &outputMultiplier, &exponent));
  178. outputShift = -exponent;
  179. int32_t outputActivationMin = 0, outputActivationMax = 0;
  180. CalculateActivationRangeUint8(activation, outputShape, &outputActivationMin,
  181. &outputActivationMax);
  182. // Prevent concurrent executions that may access the scratch buffer
  183. std::unique_lock<std::mutex> lock(executionMutex);
  184. memset(tempBuffer, 0, tempBufferByteSize);
  185. const uint8_t* inputPtr = inputData;
  186. int32_t* outputBase = tempBuffer;
  187. for (uint32_t b = 0; b < numBatches; b++) {
  188. for (uint32_t h = 0; h < inputHeight; h++) {
  189. for (uint32_t w = 0; w < inputWidth; w++) {
  190. for (uint32_t d = 0; d < inputDepth; d++) {
  191. int32_t wOutputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft;
  192. int32_t hOutputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop;
  193. for (uint32_t i = 0; i < filterHeight; i++) {
  194. for (uint32_t j = 0; j < filterWidth; j++) {
  195. for (uint32_t k = 0; k < outputDepth; k++) {
  196. int32_t hOutput = hOutputOrigin + static_cast<int32_t>(i);
  197. int32_t wOutput = wOutputOrigin + static_cast<int32_t>(j);
  198. if (hOutput >= 0 && hOutput < static_cast<int32_t>(outputHeight) &&
  199. wOutput >= 0 && wOutput < static_cast<int32_t>(outputWidth)) {
  200. uint32_t filterIndex =
  201. k * filterHeight * filterWidth * inputDepth +
  202. i * filterWidth * inputDepth + j * inputDepth + d;
  203. uint32_t outputIndex = hOutput * outputWidth * outputDepth +
  204. wOutput * outputDepth + k;
  205. outputBase[outputIndex] +=
  206. (static_cast<int32_t>(*inputPtr) + inputOffset) *
  207. (static_cast<int32_t>(filterData[filterIndex]) +
  208. filterOffset);
  209. }
  210. }
  211. }
  212. }
  213. inputPtr++;
  214. }
  215. }
  216. }
  217. outputBase += outputHeight * outputWidth * outputDepth;
  218. }
  219. const uint32_t outerSize = numBatches * outputHeight * outputWidth;
  220. int32_t* bufferPtr = tempBuffer;
  221. uint8_t* outPtr = outputData;
  222. for (uint32_t i = 0; i < outerSize; i++) {
  223. for (uint32_t d = 0; d < outputDepth; d++, bufferPtr++, outPtr++) {
  224. int32_t outVal = *bufferPtr + biasData[d];
  225. outVal = tflite::MultiplyByQuantizedMultiplier(outVal, outputMultiplier, -outputShift);
  226. outVal += outputOffset;
  227. outVal = std::max(std::min(outVal, outputActivationMax), outputActivationMin);
  228. *outPtr = static_cast<uint8_t>(outVal);
  229. }
  230. }
  231. return true;
  232. }
  233. bool transposeConvNhwc(const _Float16* inputData, const Shape& inputShape,
  234. const _Float16* filterData, const Shape& filterShape,
  235. const _Float16* biasData, const Shape& biasShape,
  236. const TransposeConv2dParam& param, _Float16* outputData,
  237. const Shape& outputShape) {
  238. NNTRACE_TRANS("transposeConvFloat16");
  239. std::vector<float> inputData_float32(getNumberOfElements(inputShape));
  240. std::vector<float> filterData_float32(getNumberOfElements(filterShape));
  241. std::vector<float> biasData_float32(getNumberOfElements(biasShape));
  242. std::vector<float> outputData_float32(getNumberOfElements(outputShape));
  243. convertFloat16ToFloat32(inputData, &inputData_float32);
  244. convertFloat16ToFloat32(filterData, &filterData_float32);
  245. convertFloat16ToFloat32(biasData, &biasData_float32);
  246. transposeConvNhwc(inputData_float32.data(), inputShape, filterData_float32.data(), filterShape,
  247. biasData_float32.data(), biasShape, param, outputData_float32.data(),
  248. outputShape);
  249. convertFloat32ToFloat16(outputData_float32, outputData);
  250. return true;
  251. }
  252. template <typename T_Input, typename T_Filter, typename T_Bias>
  253. bool transposeConv(const T_Input* inputData, const Shape& inputShape, const T_Filter* filterData,
  254. const Shape& filterShape, const T_Bias* biasData, const Shape& biasShape,
  255. const TransposeConv2dParam& param, T_Input* outputData,
  256. const Shape& outputShape) {
  257. InputWithLayout<T_Input> input(param.useNchw);
  258. OutputWithLayout<T_Input> output(param.useNchw);
  259. NN_RET_CHECK(input.initialize(inputData, inputShape));
  260. NN_RET_CHECK(output.initialize(outputData, outputShape));
  261. NN_RET_CHECK(transposeConvNhwc(input.getNhwcBuffer(), input.getNhwcShape(), filterData,
  262. filterShape, biasData, biasShape, param, output.getNhwcBuffer(),
  263. output.getNhwcShape()));
  264. NN_RET_CHECK(output.commit());
  265. return true;
  266. }
  267. bool transposeConvQuant8PerChannelNhwc(const uint8_t* inputData, const Shape& inputShape,
  268. const int8_t* filterData, const Shape& filterShape,
  269. const float* filterScales, const int32_t* biasData,
  270. const Shape& biasShape, const TransposeConv2dParam& param,
  271. uint8_t* outputData, const Shape& outputShape) {
  272. NNTRACE_TRANS("transposeConvQuant8PerChannel");
  273. ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
  274. int32_t* tempBuffer = nullptr;
  275. std::unique_ptr<int32_t[]> bufferGuard;
  276. uint32_t tempBufferByteSize = getNumberOfElements(outputShape) * sizeof(int32_t);
  277. if (tempBufferByteSize <= kStaticBufferSize) {
  278. tempBuffer = reinterpret_cast<int32_t*>(static_scratch_buffer);
  279. } else {
  280. tempBuffer = new (std::nothrow) int32_t[tempBufferByteSize / sizeof(int32_t)];
  281. if (tempBuffer == nullptr) {
  282. LOG(ERROR) << "ConvTranspose size is too large, not enough memory";
  283. return false;
  284. }
  285. bufferGuard.reset(tempBuffer);
  286. }
  287. int32_t inputOffset = -inputShape.offset;
  288. int32_t outputOffset = outputShape.offset;
  289. std::vector<double> realMultiplier(outputDepth, 0.0);
  290. std::vector<int32_t> outputMultiplier(outputDepth, 0);
  291. std::vector<int32_t> outputShift(outputDepth, 0);
  292. for (int i = 0; i < outputDepth; ++i) {
  293. Shape filterChannelShape = filterShape;
  294. filterChannelShape.scale = filterScales[i];
  295. Shape biasChannelShape = biasShape;
  296. biasChannelShape.scale = filterScales[i] * inputShape.scale;
  297. NN_RET_CHECK(GetQuantizedConvolutionMultipler(
  298. inputShape, filterChannelShape, biasChannelShape, outputShape, &realMultiplier[i]));
  299. int exponent;
  300. NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &exponent));
  301. outputShift[i] = -exponent;
  302. }
  303. int32_t outputActivationMin = 0, outputActivationMax = 0;
  304. CalculateActivationRangeUint8(activation, outputShape, &outputActivationMin,
  305. &outputActivationMax);
  306. // Prevent concurrent executions that may access the scratch buffer
  307. std::unique_lock<std::mutex> lock(executionMutex);
  308. memset(tempBuffer, 0, tempBufferByteSize);
  309. const uint8_t* inputPtr = inputData;
  310. int32_t* outputBase = tempBuffer;
  311. for (uint32_t b = 0; b < numBatches; b++) {
  312. for (uint32_t h = 0; h < inputHeight; h++) {
  313. for (uint32_t w = 0; w < inputWidth; w++) {
  314. for (uint32_t d = 0; d < inputDepth; d++) {
  315. int32_t wOutputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft;
  316. int32_t hOutputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop;
  317. for (uint32_t i = 0; i < filterHeight; i++) {
  318. for (uint32_t j = 0; j < filterWidth; j++) {
  319. for (uint32_t k = 0; k < outputDepth; k++) {
  320. int32_t hOutput = hOutputOrigin + static_cast<int32_t>(i);
  321. int32_t wOutput = wOutputOrigin + static_cast<int32_t>(j);
  322. if (hOutput >= 0 && hOutput < static_cast<int32_t>(outputHeight) &&
  323. wOutput >= 0 && wOutput < static_cast<int32_t>(outputWidth)) {
  324. uint32_t filterIndex =
  325. k * filterHeight * filterWidth * inputDepth +
  326. i * filterWidth * inputDepth + j * inputDepth + d;
  327. uint32_t outputIndex = hOutput * outputWidth * outputDepth +
  328. wOutput * outputDepth + k;
  329. outputBase[outputIndex] +=
  330. (static_cast<int32_t>(*inputPtr) + inputOffset) *
  331. static_cast<int32_t>(filterData[filterIndex]);
  332. }
  333. }
  334. }
  335. }
  336. inputPtr++;
  337. }
  338. }
  339. }
  340. outputBase += outputHeight * outputWidth * outputDepth;
  341. }
  342. const uint32_t outerSize = numBatches * outputHeight * outputWidth;
  343. int32_t* bufferPtr = tempBuffer;
  344. uint8_t* outPtr = outputData;
  345. for (uint32_t i = 0; i < outerSize; i++) {
  346. for (uint32_t d = 0; d < outputDepth; d++, bufferPtr++, outPtr++) {
  347. int32_t outVal = *bufferPtr + biasData[d];
  348. outVal = tflite::MultiplyByQuantizedMultiplier(outVal, outputMultiplier[d],
  349. -outputShift[d]);
  350. outVal += outputOffset;
  351. outVal = std::max(std::min(outVal, outputActivationMax), outputActivationMin);
  352. *outPtr = static_cast<uint8_t>(outVal);
  353. }
  354. }
  355. return true;
  356. }
  357. bool transposeConvQuant8PerChannel(const uint8_t* inputData, const Shape& inputShape,
  358. const int8_t* filterData, const Shape& filterShape,
  359. const float* filterScales, const int32_t* biasData,
  360. const Shape& biasShape, const TransposeConv2dParam& param,
  361. uint8_t* outputData, const Shape& outputShape) {
  362. InputWithLayout<uint8_t> input(param.useNchw);
  363. OutputWithLayout<uint8_t> output(param.useNchw);
  364. NN_RET_CHECK(input.initialize(inputData, inputShape));
  365. NN_RET_CHECK(output.initialize(outputData, outputShape));
  366. NN_RET_CHECK(transposeConvQuant8PerChannelNhwc(
  367. input.getNhwcBuffer(), input.getNhwcShape(), filterData, filterShape, filterScales,
  368. biasData, biasShape, param, output.getNhwcBuffer(), output.getNhwcShape()));
  369. NN_RET_CHECK(output.commit());
  370. return true;
  371. }
  372. #undef ANDROID_NN_TRANSPOSE_CONV_PARAMETERS
  373. } // namespace
  374. bool validate(const IOperationValidationContext* context) {
  375. NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
  376. auto inputCount = context->getNumInputs();
  377. auto inputType = context->getInputType(kInputTensor);
  378. auto filterType = context->getInputType(kFilterTensor);
  379. std::vector<OperandType> inExpectedTypes;
  380. if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) {
  381. inExpectedTypes = {inputType, inputType, inputType};
  382. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  383. NN_RET_CHECK(filterType == OperandType::TENSOR_QUANT8_ASYMM ||
  384. filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)
  385. << "Unsupported filter tensor type for operation " << kOperationName;
  386. if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
  387. NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim,
  388. 0)
  389. << "Unsupported filter tensor channel dimension for operation "
  390. << kOperationName;
  391. }
  392. inExpectedTypes = {inputType, filterType, OperandType::TENSOR_INT32};
  393. } else {
  394. NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << kOperationName;
  395. }
  396. std::vector<OperandType> argExpectedTypes;
  397. if (inputCount == 11) {
  398. argExpectedTypes = {OperandType::INT32, OperandType::INT32, OperandType::INT32,
  399. OperandType::INT32, OperandType::INT32, OperandType::INT32,
  400. OperandType::INT32, OperandType::BOOL};
  401. } else {
  402. argExpectedTypes = {OperandType::TENSOR_INT32, OperandType::INT32, OperandType::INT32,
  403. OperandType::INT32, OperandType::INT32, OperandType::BOOL};
  404. }
  405. inExpectedTypes.insert(inExpectedTypes.end(), argExpectedTypes.begin(), argExpectedTypes.end());
  406. NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2));
  407. return validateInputTypes(context, inExpectedTypes) &&
  408. validateOutputTypes(context, {inputType});
  409. }
  410. bool prepare(IOperationExecutionContext* context) {
  411. Shape input = context->getInputShape(kInputTensor);
  412. Shape filter = context->getInputShape(kFilterTensor);
  413. Shape bias = context->getInputShape(kBiasTensor);
  414. if (filter.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
  415. NN_RET_CHECK(input.type == OperandType::TENSOR_QUANT8_ASYMM);
  416. } else {
  417. NN_RET_CHECK(input.type == filter.type);
  418. }
  419. if (input.type == OperandType::TENSOR_QUANT8_ASYMM) {
  420. NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32);
  421. } else {
  422. NN_RET_CHECK(input.type == bias.type);
  423. }
  424. NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4);
  425. NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4);
  426. NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1);
  427. TransposeConv2dParam param;
  428. NN_RET_CHECK(param.initialize(context));
  429. uint32_t batches = getSizeOfDimension(input, 0);
  430. uint32_t height = getSizeOfDimension(input, param.useNchw ? 2 : 1);
  431. uint32_t width = getSizeOfDimension(input, param.useNchw ? 3 : 2);
  432. uint32_t channels_in = getSizeOfDimension(input, param.useNchw ? 1 : 3);
  433. uint32_t channels_out = getSizeOfDimension(filter, 0);
  434. uint32_t filterHeight = getSizeOfDimension(filter, 1);
  435. uint32_t filterWidth = getSizeOfDimension(filter, 2);
  436. // Only batches can be zero.
  437. NN_RET_CHECK_EQ(channels_in, getSizeOfDimension(filter, 3));
  438. NN_RET_CHECK_EQ(channels_out, getSizeOfDimension(bias, 0));
  439. NN_RET_CHECK_GT(height, 0);
  440. NN_RET_CHECK_GT(width, 0);
  441. NN_RET_CHECK_GT(channels_in, 0);
  442. NN_RET_CHECK_GT(channels_out, 0);
  443. NN_RET_CHECK_GT(filterWidth, 0);
  444. NN_RET_CHECK_GT(filterHeight, 0);
  445. uint32_t outWidth = computeOutSizeTransposeConv(width, filterWidth, param.strideWidth,
  446. param.paddingLeft, param.paddingRight);
  447. uint32_t outHeight = computeOutSizeTransposeConv(height, filterHeight, param.strideHeight,
  448. param.paddingTop, param.paddingBottom);
  449. NN_RET_CHECK_GT(outWidth, 0);
  450. NN_RET_CHECK_GT(outHeight, 0);
  451. Shape output = context->getOutputShape(kOutputTensor);
  452. output.type = input.type;
  453. if (param.useNchw) {
  454. output.dimensions = {batches, channels_out, outHeight, outWidth};
  455. } else {
  456. output.dimensions = {batches, outHeight, outWidth, channels_out};
  457. }
  458. return context->setOutputShape(kOutputTensor, output);
  459. }
  460. bool execute(IOperationExecutionContext* context) {
  461. // Bypass execution in the case of zero-sized input.
  462. if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
  463. TransposeConv2dParam param;
  464. NN_RET_CHECK(param.initialize(context));
  465. switch (context->getInputType(kInputTensor)) {
  466. case OperandType::TENSOR_FLOAT32:
  467. return transposeConv(context->getInputBuffer<float>(kInputTensor),
  468. context->getInputShape(kInputTensor),
  469. context->getInputBuffer<float>(kFilterTensor),
  470. context->getInputShape(kFilterTensor),
  471. context->getInputBuffer<float>(kBiasTensor),
  472. context->getInputShape(kBiasTensor), param,
  473. context->getOutputBuffer<float>(kOutputTensor),
  474. context->getOutputShape(kOutputTensor));
  475. case OperandType::TENSOR_FLOAT16:
  476. return transposeConv(context->getInputBuffer<_Float16>(kInputTensor),
  477. context->getInputShape(kInputTensor),
  478. context->getInputBuffer<_Float16>(kFilterTensor),
  479. context->getInputShape(kFilterTensor),
  480. context->getInputBuffer<_Float16>(kBiasTensor),
  481. context->getInputShape(kBiasTensor), param,
  482. context->getOutputBuffer<_Float16>(kOutputTensor),
  483. context->getOutputShape(kOutputTensor));
  484. case OperandType::TENSOR_QUANT8_ASYMM:
  485. if (context->getInputType(kFilterTensor) ==
  486. OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
  487. return transposeConvQuant8PerChannel(
  488. context->getInputBuffer<uint8_t>(kInputTensor),
  489. context->getInputShape(kInputTensor),
  490. context->getInputBuffer<int8_t>(kFilterTensor),
  491. context->getInputShape(kFilterTensor),
  492. context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(),
  493. context->getInputBuffer<int32_t>(kBiasTensor),
  494. context->getInputShape(kBiasTensor), param,
  495. context->getOutputBuffer<uint8_t>(kOutputTensor),
  496. context->getOutputShape(kOutputTensor));
  497. } else if (context->getInputType(kFilterTensor) == OperandType::TENSOR_QUANT8_ASYMM) {
  498. return transposeConv(context->getInputBuffer<uint8_t>(kInputTensor),
  499. context->getInputShape(kInputTensor),
  500. context->getInputBuffer<uint8_t>(kFilterTensor),
  501. context->getInputShape(kFilterTensor),
  502. context->getInputBuffer<int32_t>(kBiasTensor),
  503. context->getInputShape(kBiasTensor), param,
  504. context->getOutputBuffer<uint8_t>(kOutputTensor),
  505. context->getOutputShape(kOutputTensor));
  506. } else {
  507. NN_RET_CHECK_FAIL() << "Unsupported filter type for operation " << kOperationName;
  508. }
  509. default:
  510. NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName;
  511. }
  512. }
  513. } // namespace transpose_conv_2d
  514. NN_REGISTER_OPERATION(TRANSPOSE_CONV_2D, transpose_conv_2d::kOperationName,
  515. transpose_conv_2d::validate, transpose_conv_2d::prepare,
  516. transpose_conv_2d::execute, .allowZeroSizedInput = true);
  517. } // namespace nn
  518. } // namespace android