Utils.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /*
  2. * Copyright (C) 2017 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef ANDROID_ML_NN_COMMON_UTILS_H
  17. #define ANDROID_ML_NN_COMMON_UTILS_H
  18. #include "HalInterfaces.h"
  19. #include "NeuralNetworks.h"
  20. #include "ValidateHal.h"
  21. #include <android-base/logging.h>
  22. #include <optional>
  23. #include <set>
  24. #include <vector>
  25. namespace android {
  26. namespace nn {
  27. // The number of data types (OperandCode) defined in NeuralNetworks.h.
  28. const int kNumberOfDataTypes = 14;
  29. // The number of operation types (OperationCode) defined in NeuralNetworks.h.
  30. const int kNumberOfOperationTypes = 95;
  31. // The number of execution preferences defined in NeuralNetworks.h.
  32. const int kNumberOfPreferences = 3;
  33. // The number of data types (OperandCode) defined in NeuralNetworksOEM.h.
  34. const int kNumberOfDataTypesOEM = 2;
  35. // The number of operation types (OperationCode) defined in NeuralNetworksOEM.h.
  36. const int kNumberOfOperationTypesOEM = 1;
  37. // The lowest number assigned to any OEM Code in NeuralNetworksOEM.h.
  38. const int kOEMCodeBase = 10000;
  39. /* IMPORTANT: if you change the following list, don't
  40. * forget to update the corresponding 'tags' table in
  41. * the initVlogMask() function implemented in Utils.cpp.
  42. */
  43. enum VLogFlags {
  44. MODEL = 0,
  45. COMPILATION,
  46. EXECUTION,
  47. CPUEXE,
  48. MANAGER,
  49. DRIVER
  50. };
  51. #define VLOG_IS_ON(TAG) \
  52. ((vLogMask & (1 << (TAG))) != 0)
  53. #define VLOG(TAG) \
  54. if (LIKELY(!VLOG_IS_ON(TAG))) \
  55. ; \
  56. else \
  57. LOG(INFO)
  58. extern int vLogMask;
  59. void initVLogMask();
  60. #ifdef NN_DEBUGGABLE
  61. #define SHOW_IF_DEBUG(msg) msg
  62. #else
  63. #define SHOW_IF_DEBUG(msg) ""
  64. #endif
  65. // DEPRECATED(b/118737105). Use CHECK.
  66. #define nnAssert(v) CHECK(v)
  67. #define NN_RETURN_IF_ERROR(expr) \
  68. do { \
  69. int _errorCode = (expr); \
  70. if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \
  71. return _errorCode; \
  72. } \
  73. } while (0)
  74. // The NN_RET_CHECK family of macros defined below is similar to the CHECK family defined in
  75. // system/core/base/include/android-base/logging.h
  76. //
  77. // The difference is that NN_RET_CHECK macros use LOG(ERROR) instead of LOG(FATAL)
  78. // and return false instead of aborting.
  79. // Logs an error and returns false. Append context using << after. For example:
  80. //
  81. // NN_RET_CHECK_FAIL() << "Something went wrong";
  82. //
  83. // The containing function must return a bool.
  84. #define NN_RET_CHECK_FAIL() \
  85. return ::android::nn::FalseyErrorStream() \
  86. << "NN_RET_CHECK failed (" << __FILE__ << ":" << __LINE__ << "): "
  87. // Logs an error and returns false if condition is false. Extra logging can be appended using <<
  88. // after. For example:
  89. //
  90. // NN_RET_CHECK(false) << "Something went wrong";
  91. //
  92. // The containing function must return a bool.
  93. #define NN_RET_CHECK(condition) \
  94. while (UNLIKELY(!(condition))) NN_RET_CHECK_FAIL() << #condition << " "
  95. // Helper for NN_CHECK_xx(x, y) macros.
  96. #define NN_RET_CHECK_OP(LHS, RHS, OP) \
  97. for (auto _values = ::android::base::MakeEagerEvaluator(LHS, RHS); \
  98. UNLIKELY(!(_values.lhs OP _values.rhs)); \
  99. /* empty */) \
  100. NN_RET_CHECK_FAIL() << #LHS << " " << #OP << " " << #RHS << " (" << #LHS << " = " \
  101. << _values.lhs << ", " << #RHS << " = " << _values.rhs << ") "
  102. // Logs an error and returns false if a condition between x and y does not hold. Extra logging can
  103. // be appended using << after. For example:
  104. //
  105. // NN_RET_CHECK_EQ(a, b) << "Something went wrong";
  106. //
  107. // The values must implement the appropriate comparison operator as well as
  108. // `operator<<(std::ostream&, ...)`.
  109. // The containing function must return a bool.
  110. #define NN_RET_CHECK_EQ(x, y) NN_RET_CHECK_OP(x, y, ==)
  111. #define NN_RET_CHECK_NE(x, y) NN_RET_CHECK_OP(x, y, !=)
  112. #define NN_RET_CHECK_LE(x, y) NN_RET_CHECK_OP(x, y, <=)
  113. #define NN_RET_CHECK_LT(x, y) NN_RET_CHECK_OP(x, y, <)
  114. #define NN_RET_CHECK_GE(x, y) NN_RET_CHECK_OP(x, y, >=)
  115. #define NN_RET_CHECK_GT(x, y) NN_RET_CHECK_OP(x, y, >)
  116. // A wrapper around LOG(ERROR) that can be implicitly converted to bool (always evaluates to false).
  117. // Used to implement stream logging in NN_RET_CHECK.
  118. class FalseyErrorStream {
  119. DISALLOW_COPY_AND_ASSIGN(FalseyErrorStream);
  120. public:
  121. FalseyErrorStream() {}
  122. template <typename T>
  123. FalseyErrorStream& operator<<(const T& value) {
  124. mBuffer << value;
  125. return *this;
  126. }
  127. ~FalseyErrorStream() { LOG(ERROR) << mBuffer.str(); }
  128. operator bool() const { return false; }
  129. private:
  130. std::ostringstream mBuffer;
  131. };
  132. // Return a vector with one entry for each non extension OperandType, set to the
  133. // specified PerformanceInfo value. The vector will be sorted by OperandType.
  134. hidl_vec<Capabilities::OperandPerformance> nonExtensionOperandPerformance(PerformanceInfo perf);
  135. // Update the vector entry corresponding to the specified OperandType with the
  136. // specified PerformanceInfo value. The vector must already have an entry for
  137. // that OperandType, and must be sorted by OperandType.
  138. void update(hidl_vec<Capabilities::OperandPerformance>* operandPerformance, OperandType type,
  139. PerformanceInfo perf);
  140. // Look for a vector entry corresponding to the specified OperandType. If
  141. // found, return the associated PerformanceInfo. If not, return a pessimistic
  142. // PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType.
  143. PerformanceInfo lookup(const hidl_vec<Capabilities::OperandPerformance>& operandPerformance,
  144. OperandType type);
  145. // Returns true if an operand type is an extension type.
  146. bool isExtensionOperandType(OperandType type);
  147. // Returns true if an operation type is an extension type.
  148. bool isExtensionOperationType(OperationType type);
  149. // Returns the amount of space needed to store a value of the specified
  150. // dimensions and type. For a tensor with unspecified rank or at least one
  151. // unspecified dimension, returns zero.
  152. //
  153. // Aborts if the specified type is an extension type.
  154. //
  155. // See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
  156. uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions);
  157. // Returns the amount of space needed to store a value of the dimensions and
  158. // type of this operand. For a tensor with unspecified rank or at least one
  159. // unspecified dimension, returns zero.
  160. //
  161. // Aborts if the specified type is an extension type.
  162. //
  163. // See also TypeManager::getSizeOfData(const Operand&).
  164. inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) {
  165. return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
  166. }
  167. // Returns true if a non-extension operand type is a scalar type.
  168. //
  169. // Aborts if the specified type is an extension type.
  170. //
  171. // See also TypeManager::isTensorType(OperandType).
  172. bool nonExtensionOperandTypeIsScalar(int type);
  173. // Returns the name of the operation type in ASCII.
  174. std::string getOperationName(OperationType opCode);
  175. // Returns the name of the operand type in ASCII.
  176. std::string getOperandTypeName(OperandType type);
  177. // Whether an operand of tensor type has unspecified dimensions.
  178. //
  179. // Undefined behavior if the operand type is a scalar type.
  180. bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
  181. bool tensorHasUnspecifiedDimensions(const Operand& operand);
  182. bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
  183. // Memory is unmapped.
  184. // Memory is reference counted by hidl_memory instances, and is deallocated
  185. // once there are no more references.
  186. hidl_memory allocateSharedMemory(int64_t size);
  187. // Returns the number of padding bytes needed to align data of the
  188. // specified length. It aligns object of length:
  189. // 2, 3 on a 2 byte boundary,
  190. // 4+ on a 4 byte boundary.
  191. // We may want to have different alignments for tensors.
  192. // TODO: This is arbitrary, more a proof of concept. We need
  193. // to determine what this should be.
  194. uint32_t alignBytesNeeded(uint32_t index, size_t length);
  195. // Does a detailed LOG(INFO) of the model
  196. void logModelToInfo(const V1_0::Model& model);
  197. void logModelToInfo(const V1_1::Model& model);
  198. void logModelToInfo(const V1_2::Model& model);
  199. inline std::string toString(uint32_t obj) {
  200. return std::to_string(obj);
  201. }
  202. template <typename Type>
  203. std::string toString(const std::vector<Type>& range) {
  204. std::string os = "[";
  205. for (size_t i = 0; i < range.size(); ++i) {
  206. os += (i == 0 ? "" : ", ") + toString(range[i]);
  207. }
  208. return os += "]";
  209. }
  210. inline std::string toString(HalVersion halVersion) {
  211. switch (halVersion) {
  212. case HalVersion::UNKNOWN:
  213. return "UNKNOWN HAL version";
  214. case HalVersion::V1_0:
  215. return "HAL version 1.0";
  216. case HalVersion::V1_1:
  217. return "HAL version 1.1";
  218. case HalVersion::V1_2:
  219. return "HAL version 1.2";
  220. }
  221. }
  222. inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) {
  223. return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM);
  224. }
  225. bool validateOperandSymmPerChannelQuantParams(
  226. const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant,
  227. const char* tag);
  228. // Validates an operand type.
  229. //
  230. // extensionOperandTypeInfo must be nullptr iff the type is not an extension type.
  231. //
  232. // If allowPartial is true, the dimensions may be underspecified.
  233. int validateOperandType(const ANeuralNetworksOperandType& type,
  234. const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
  235. const char* tag, bool allowPartial);
  236. int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
  237. const char* tag);
  238. // Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the
  239. // provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA.
  240. int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
  241. const uint32_t* inputIndexes, uint32_t outputCount,
  242. const uint32_t* outputIndexes, const std::vector<Operand>& operands,
  243. HalVersion halVersion);
  244. inline size_t getSizeFromInts(int lower, int higher) {
  245. return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32);
  246. }
  247. // Convert ANEURALNETWORKS_* result code to ErrorStatus.
  248. // Not guaranteed to be a 1-to-1 mapping.
  249. ErrorStatus convertResultCodeToErrorStatus(int resultCode);
  250. // Convert ErrorStatus to ANEURALNETWORKS_* result code.
  251. // Not guaranteed to be a 1-to-1 mapping.
  252. int convertErrorStatusToResultCode(ErrorStatus status);
  253. // Versioning
  254. bool compliantWithV1_0(const V1_0::Capabilities& capabilities);
  255. bool compliantWithV1_0(const V1_1::Capabilities& capabilities);
  256. bool compliantWithV1_0(const V1_2::Capabilities& capabilities);
  257. bool compliantWithV1_1(const V1_0::Capabilities& capabilities);
  258. bool compliantWithV1_1(const V1_1::Capabilities& capabilities);
  259. bool compliantWithV1_1(const V1_2::Capabilities& capabilities);
  260. bool compliantWithV1_2(const V1_0::Capabilities& capabilities);
  261. bool compliantWithV1_2(const V1_1::Capabilities& capabilities);
  262. bool compliantWithV1_2(const V1_2::Capabilities& capabilities);
  263. bool compliantWithV1_0(const V1_2::Operand& operand);
  264. // If noncompliantOperations != nullptr, then
  265. // precondition: noncompliantOperations->empty()
  266. // postcondition: *noncompliantOperations consists of the indices of the noncompliant
  267. // operations; if the compliance check fails for some reason
  268. // other than a noncompliant operation,
  269. // *noncompliantOperations consists of the indices of all operations
  270. bool compliantWithV1_0(const V1_0::Model& model);
  271. bool compliantWithV1_0(const V1_1::Model& model);
  272. bool compliantWithV1_0(const V1_2::Model& model,
  273. std::set<uint32_t>* noncompliantOperations = nullptr);
  274. bool compliantWithV1_1(const V1_0::Model& model);
  275. bool compliantWithV1_1(const V1_1::Model& model);
  276. bool compliantWithV1_1(const V1_2::Model& model,
  277. std::set<uint32_t>* noncompliantOperations = nullptr);
  278. V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities);
  279. V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities);
  280. V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities);
  281. V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities);
  282. V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities);
  283. V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities);
  284. V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities);
  285. V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities);
  286. V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities);
  287. V1_0::Model convertToV1_0(const V1_0::Model& model);
  288. V1_0::Model convertToV1_0(const V1_1::Model& model);
  289. V1_0::Model convertToV1_0(const V1_2::Model& model);
  290. V1_1::Model convertToV1_1(const V1_0::Model& model);
  291. V1_1::Model convertToV1_1(const V1_1::Model& model);
  292. V1_1::Model convertToV1_1(const V1_2::Model& model);
  293. V1_2::Model convertToV1_2(const V1_0::Model& model);
  294. V1_2::Model convertToV1_2(const V1_1::Model& model);
  295. V1_2::Model convertToV1_2(const V1_2::Model& model);
  296. // The IModelSlicer abstract class provides methods to create from an original
  297. // model a "slice" of that model consisting of the subset of operations that is
  298. // compliant with a particular HAL version, and a mechanism for mapping
  299. // operations from the slice back to operations of the original model. The
  300. // slice is intended to be passed to getSupportedOperations*(), with the mapping
  301. // used to translate the results of that call from the slice's operations to the
  302. // original model's operations. The slice has no other purpose (for example, it
  303. // is not guaranteed to have the same topology as a subgraph of the original
  304. // model).
  305. //
  306. // Note that the original model is not part of the ModelSlicer specification --
  307. // an instance of a class derived from ModelSlicer is responsible for knowing
  308. // the original model. getSlice*() methods may be called multiple times on a
  309. // given instance; the intention is that the instance cache slices internally.
  310. //
  311. // The meaning of the return value of the getSlice*() methods is explained by
  312. // the following example:
  313. //
  314. // IModelSlicer* slicer = ...;
  315. // auto ret = slicer->getSliceV1_0(); // getSliceV1_1() is similar
  316. // if (ret.has_value()) {
  317. // const V1_0::Model model = ret->first; // the slice
  318. // auto mapper = ret->second;
  319. // // mapper is a functor that takes an operation index in the
  320. // // slice and returns the corresponding operation index in the
  321. // // original model. The functor must remain valid for the lifetime
  322. // // of *slicer.
  323. // } else {
  324. // // Could not obtain a slice. For example, perhaps none of the
  325. // // original model's operations are compliant with V1_0.
  326. // }
  327. //
  328. class IModelSlicer {
  329. public:
  330. virtual std::optional<std::pair<V1_0::Model, std::function<uint32_t(uint32_t)>>>
  331. getSliceV1_0() = 0;
  332. virtual std::optional<std::pair<V1_1::Model, std::function<uint32_t(uint32_t)>>>
  333. getSliceV1_1() = 0;
  334. virtual ~IModelSlicer() = default;
  335. };
  336. V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type);
  337. V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type);
  338. V1_0::Operand convertToV1_0(const V1_2::Operand& operand);
  339. V1_2::Operand convertToV1_2(const V1_0::Operand& operand);
  340. V1_2::Operand convertToV1_2(const V1_2::Operand& operand);
  341. hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands);
  342. hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_2::Operand>& operands);
  343. #ifdef NN_DEBUGGABLE
  344. uint32_t getProp(const char* str, uint32_t defaultValue = 0);
  345. #endif // NN_DEBUGGABLE
  346. } // namespace nn
  347. } // namespace android
  348. #endif // ANDROID_ML_NN_COMMON_UTILS_H