CpuOperationUtils.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * Copyright (C) 2017 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef ANDROID_ML_NN_COMMON_CPU_OPERATION_UTILS_H
  17. #define ANDROID_ML_NN_COMMON_CPU_OPERATION_UTILS_H
  18. #include "OperationsUtils.h"
  19. #include <algorithm>
  20. #include <cmath>
  21. #include <limits>
  22. #include "tensorflow/lite/kernels/internal/types.h"
  23. namespace android {
  24. namespace nn {
  25. // The implementations in tflite/kernels/internal/ take a Dims<4> object
  26. // even if the original tensors were not 4D.
  27. inline tflite::Dims<4> convertShapeToDims(const Shape& shape) {
  28. nnAssert(shape.dimensions.size() <= 4);
  29. tflite::Dims<4> dims;
  30. // The dimensions are reversed in Dims<4>.
  31. for (int i = 0; i < 4; ++i) {
  32. int src = static_cast<int>(shape.dimensions.size()) - i - 1;
  33. if (src >= 0) {
  34. dims.sizes[i] = static_cast<int>(getSizeOfDimension(shape, src));
  35. } else {
  36. dims.sizes[i] = 1;
  37. }
  38. }
  39. dims.strides[0] = 1;
  40. for (int i = 1; i < 4; i++) {
  41. dims.strides[i] = dims.strides[i - 1] * dims.sizes[i - 1];
  42. }
  43. return dims;
  44. }
  45. inline tflite::RuntimeShape convertShapeToTflshape(const Shape& shape) {
  46. nnAssert(shape.dimensions.size() <= 4);
  47. std::vector<int32_t> tflShapeDim(shape.dimensions.begin(), shape.dimensions.end());
  48. return tflite::RuntimeShape(tflShapeDim.size(), tflShapeDim.data());
  49. }
  50. inline void convertFloat16ToFloat32(const _Float16* input, std::vector<float>* output) {
  51. CHECK(input != nullptr);
  52. CHECK(output != nullptr);
  53. for (int i = 0; i < output->size(); ++i) {
  54. (*output)[i] = static_cast<float>(input[i]);
  55. }
  56. }
  57. inline void convertFloat32ToFloat16(const std::vector<float>& input, _Float16* output) {
  58. CHECK(output != nullptr);
  59. for (int i = 0; i < input.size(); ++i) {
  60. output[i] = input[i];
  61. }
  62. }
  63. template <typename T>
  64. inline void convertQuantToFloat32(const T* input, float scale, int32_t zeroPoint,
  65. std::vector<float>* output) {
  66. CHECK(input != nullptr);
  67. CHECK(output != nullptr);
  68. for (int i = 0; i < output->size(); ++i) {
  69. (*output)[i] = (static_cast<float>(input[i]) - zeroPoint) * scale;
  70. }
  71. }
  72. template <typename T>
  73. inline void convertFloat32ToQuant(const std::vector<float>& input, float scale, int32_t zeroPoint,
  74. T* output) {
  75. CHECK(output != nullptr);
  76. for (int i = 0; i < input.size(); ++i) {
  77. int32_t intVal = std::round(input[i] / scale + zeroPoint);
  78. intVal = std::min<int32_t>(std::max<int32_t>(intVal, std::numeric_limits<T>::min()),
  79. std::numeric_limits<T>::max());
  80. output[i] = static_cast<T>(intVal);
  81. }
  82. }
  83. template <typename T>
  84. inline bool convertNchwToNhwc(const T* nchw, const Shape& nchwShape, std::vector<T>* nhwc,
  85. Shape* nhwcShape) {
  86. NN_RET_CHECK_EQ(getNumberOfDimensions(nchwShape), 4)
  87. << "Error converting a non-4-D tensor to NHWC layout";
  88. *nhwcShape = nchwShape;
  89. const auto& fromDim = nchwShape.dimensions;
  90. nhwcShape->dimensions = {fromDim[0], fromDim[2], fromDim[3], fromDim[1]};
  91. nhwc->resize(getNumberOfElements(nchwShape));
  92. auto to = nhwc->data();
  93. uint32_t spatialSize = fromDim[2] * fromDim[3];
  94. for (uint32_t n = 0; n < fromDim[0]; n++) {
  95. for (uint32_t hw = 0; hw < spatialSize; hw++) {
  96. for (uint32_t c = 0; c < fromDim[1]; c++) {
  97. uint32_t fromIndex = n * fromDim[1] * spatialSize + c * spatialSize + hw;
  98. *to++ = nchw[fromIndex];
  99. }
  100. }
  101. }
  102. return true;
  103. }
  104. template <typename T>
  105. inline bool convertNhwcToNchw(const std::vector<T>& nhwc, const Shape& nhwcShape, T* nchw) {
  106. NN_RET_CHECK_EQ(getNumberOfDimensions(nhwcShape), 4)
  107. << "Error converting a non-4-D tensor to NCHW layout";
  108. const auto& fromDim = nhwcShape.dimensions;
  109. const auto from = nhwc.data();
  110. uint32_t spatialSize = fromDim[1] * fromDim[2];
  111. for (uint32_t n = 0; n < fromDim[0]; n++) {
  112. for (uint32_t c = 0; c < fromDim[3]; c++) {
  113. for (uint32_t hw = 0; hw < spatialSize; hw++) {
  114. uint32_t fromIndex = n * spatialSize * fromDim[3] + hw * fromDim[3] + c;
  115. *nchw++ = from[fromIndex];
  116. }
  117. }
  118. }
  119. return true;
  120. }
  121. template <typename T>
  122. class InputWithLayout {
  123. public:
  124. InputWithLayout(bool useNchw) : mDataOriginal(nullptr), mUseNchw(useNchw) {}
  125. bool initialize(const T* data, const Shape& shape) {
  126. mDataOriginal = data;
  127. mShape = shape;
  128. if (mUseNchw) {
  129. return convertNchwToNhwc(mDataOriginal, shape, &mDataNhwc, &mShape);
  130. }
  131. return true;
  132. }
  133. const T* getNhwcBuffer() { return mUseNchw ? mDataNhwc.data() : mDataOriginal; }
  134. const Shape& getNhwcShape() { return mShape; }
  135. private:
  136. const T* mDataOriginal;
  137. std::vector<T> mDataNhwc;
  138. Shape mShape;
  139. bool mUseNchw;
  140. };
  141. template <typename T>
  142. class OutputWithLayout {
  143. public:
  144. OutputWithLayout(bool useNchw) : mDataOriginal(nullptr), mUseNchw(useNchw) {}
  145. bool initialize(T* data, const Shape& shape) {
  146. NN_RET_CHECK_EQ(getNumberOfDimensions(shape), 4);
  147. mDataOriginal = data;
  148. mShape = shape;
  149. if (mUseNchw) {
  150. const auto& dim = shape.dimensions;
  151. mShape.dimensions = {dim[0], dim[2], dim[3], dim[1]};
  152. mDataNhwc.resize(getNumberOfElements(shape));
  153. }
  154. return true;
  155. }
  156. T* getNhwcBuffer() { return mUseNchw ? mDataNhwc.data() : mDataOriginal; }
  157. const Shape& getNhwcShape() { return mShape; }
  158. bool commit() {
  159. if (mUseNchw) {
  160. return convertNhwcToNchw(mDataNhwc, mShape, mDataOriginal);
  161. }
  162. return true;
  163. }
  164. private:
  165. T* mDataOriginal;
  166. std::vector<T> mDataNhwc;
  167. Shape mShape;
  168. bool mUseNchw;
  169. };
  170. } // namespace nn
  171. } // namespace android
  172. #endif // ANDROID_ML_NN_COMMON_CPU_OPERATION_UTILS_H