Utils.cpp 113 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383
  1. /*
  2. * Copyright (C) 2017 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define LOG_TAG "Utils"
  17. #include "Utils.h"
  18. #include "NeuralNetworks.h"
  19. #include "NeuralNetworksOEM.h"
  20. #include "OperationResolver.h"
  21. #include "ValidateHal.h"
  22. #include <android-base/logging.h>
  23. #include <android-base/properties.h>
  24. #include <android-base/strings.h>
  25. #include <sys/system_properties.h>
  26. #include <algorithm>
  27. #include <unordered_map>
  28. using ::android::hidl::allocator::V1_0::IAllocator;
  29. namespace android {
  30. namespace nn {
  31. const char kVLogPropKey[] = "debug.nn.vlog";
  32. int vLogMask = ~0;
  33. // Split the space separated list of tags from verbose log setting and build the
  34. // logging mask from it. note that '1' and 'all' are special cases to enable all
  35. // verbose logging.
  36. //
  37. // NN API verbose logging setting comes from system property debug.nn.vlog.
  38. // Example:
  39. // setprop debug.nn.vlog 1 : enable all logging tags.
  40. // setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and
  41. // COMPILATION tags.
  42. void initVLogMask() {
  43. vLogMask = 0;
  44. const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, "");
  45. if (vLogSetting.empty()) {
  46. return;
  47. }
  48. std::unordered_map<std::string, int> vLogFlags = {
  49. {"1", -1},
  50. {"all", -1},
  51. {"model", MODEL},
  52. {"compilation", COMPILATION},
  53. {"execution", EXECUTION},
  54. {"cpuexe", CPUEXE},
  55. {"manager", MANAGER},
  56. {"driver", DRIVER}};
  57. std::vector<std::string> elements = android::base::Split(vLogSetting, " ,:");
  58. for (const auto& elem : elements) {
  59. const auto& flag = vLogFlags.find(elem);
  60. if (flag == vLogFlags.end()) {
  61. LOG(ERROR) << "Unknown trace flag: " << elem;
  62. continue;
  63. }
  64. if (flag->second == -1) {
  65. // -1 is used for the special values "1" and "all" that enable all
  66. // tracing.
  67. vLogMask = ~0;
  68. return;
  69. } else {
  70. vLogMask |= 1 << flag->second;
  71. }
  72. }
  73. }
  74. static bool isExtensionOperandType(int32_t type) {
  75. return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperandTypeRange::BASE_MAX);
  76. }
  77. static bool isExtensionOperationType(ANeuralNetworksOperationType type) {
  78. return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperationTypeRange::BASE_MAX);
  79. }
  80. bool isExtensionOperandType(OperandType type) {
  81. return isExtensionOperandType(static_cast<int32_t>(type));
  82. }
  83. bool isExtensionOperationType(OperationType type) {
  84. return isExtensionOperationType(static_cast<int32_t>(type));
  85. }
  86. namespace {
  87. template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM>
  88. EntryType tableLookup(const EntryType (&table)[entryCount],
  89. const EntryType (&tableOEM)[entryCountOEM],
  90. uint32_t code) {
  91. if (code < entryCount) {
  92. return table[code];
  93. } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) {
  94. return tableOEM[code - kOEMCodeBase];
  95. } else {
  96. nnAssert(!"tableLookup: bad code");
  97. return EntryType();
  98. }
  99. }
  100. class OperationValidationContext : public IOperationValidationContext {
  101. DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext);
  102. public:
  103. OperationValidationContext(uint32_t inputCount, const uint32_t* inputIndexes,
  104. uint32_t outputCount, const uint32_t* outputIndexes,
  105. const Operand* operands, HalVersion halVersion)
  106. : inputCount(inputCount),
  107. inputIndexes(inputIndexes),
  108. outputCount(outputCount),
  109. outputIndexes(outputIndexes),
  110. operands(operands),
  111. halVersion(halVersion) {}
  112. HalVersion getHalVersion() const override;
  113. uint32_t getNumInputs() const override;
  114. OperandType getInputType(uint32_t index) const override;
  115. Shape getInputShape(uint32_t index) const override;
  116. const Operand::ExtraParams getInputExtraParams(uint32_t index) const override;
  117. uint32_t getNumOutputs() const override;
  118. OperandType getOutputType(uint32_t index) const override;
  119. Shape getOutputShape(uint32_t index) const override;
  120. private:
  121. const Operand* getInputOperand(uint32_t index) const;
  122. const Operand* getOutputOperand(uint32_t index) const;
  123. uint32_t inputCount;
  124. const uint32_t* inputIndexes;
  125. uint32_t outputCount;
  126. const uint32_t* outputIndexes;
  127. const Operand* operands;
  128. HalVersion halVersion;
  129. };
  130. HalVersion OperationValidationContext::getHalVersion() const {
  131. return halVersion;
  132. }
  133. const Operand* OperationValidationContext::getInputOperand(uint32_t index) const {
  134. CHECK(index < static_cast<uint32_t>(inputCount));
  135. return &operands[inputIndexes[index]];
  136. }
  137. const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const {
  138. CHECK(index < static_cast<uint32_t>(outputCount));
  139. return &operands[outputIndexes[index]];
  140. }
  141. uint32_t OperationValidationContext::getNumInputs() const {
  142. return inputCount;
  143. }
  144. uint32_t OperationValidationContext::getNumOutputs() const {
  145. return outputCount;
  146. }
  147. OperandType OperationValidationContext::getInputType(uint32_t index) const {
  148. return getInputOperand(index)->type;
  149. }
  150. Shape OperationValidationContext::getInputShape(uint32_t index) const {
  151. const Operand* operand = getInputOperand(index);
  152. return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
  153. operand->extraParams};
  154. }
  155. const Operand::ExtraParams OperationValidationContext::getInputExtraParams(uint32_t index) const {
  156. return getInputOperand(index)->extraParams;
  157. }
  158. OperandType OperationValidationContext::getOutputType(uint32_t index) const {
  159. return getOutputOperand(index)->type;
  160. }
  161. Shape OperationValidationContext::getOutputShape(uint32_t index) const {
  162. const Operand* operand = getOutputOperand(index);
  163. return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
  164. operand->extraParams};
  165. }
  166. }; // anonymous namespace
  167. #define COUNT(X) (sizeof(X) / sizeof(X[0]))
  168. std::string getOperandTypeName(OperandType type) {
  169. return toString(type);
  170. }
  171. static std::string getOperationName(uint32_t code) {
  172. return getOperationName(static_cast<OperationType>(code));
  173. }
  174. std::string getOperationName(OperationType type) {
  175. return toString(type);
  176. }
  177. const uint32_t kSizeOfDataType[]{
  178. 4, // ANEURALNETWORKS_FLOAT32
  179. 4, // ANEURALNETWORKS_INT32
  180. 4, // ANEURALNETWORKS_UINT32
  181. 4, // ANEURALNETWORKS_TENSOR_FLOAT32
  182. 4, // ANEURALNETWORKS_TENSOR_INT32
  183. 1, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
  184. 1, // ANEURALNETWORKS_BOOL
  185. 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
  186. 2, // ANEURALNETWORKS_TENSOR_FLOAT16
  187. 1, // ANEURALNETWORKS_TENSOR_BOOL8
  188. 2, // ANEURALNETWORKS_FLOAT16
  189. 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
  190. 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
  191. 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
  192. };
  193. static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect");
  194. const bool kScalarDataType[]{
  195. true, // ANEURALNETWORKS_FLOAT32
  196. true, // ANEURALNETWORKS_INT32
  197. true, // ANEURALNETWORKS_UINT32
  198. false, // ANEURALNETWORKS_TENSOR_FLOAT32
  199. false, // ANEURALNETWORKS_TENSOR_INT32
  200. false, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
  201. true, // ANEURALNETWORKS_BOOL
  202. false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
  203. false, // ANEURALNETWORKS_TENSOR_FLOAT16
  204. false, // ANEURALNETWORKS_TENSOR_BOOL8
  205. true, // ANEURALNETWORKS_FLOAT16
  206. false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
  207. false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
  208. false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
  209. };
  210. static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect");
  211. const uint32_t kSizeOfDataTypeOEM[]{
  212. 0, // ANEURALNETWORKS_OEM
  213. 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE
  214. };
  215. static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM,
  216. "kSizeOfDataTypeOEM is incorrect");
  217. const bool kScalarDataTypeOEM[]{
  218. true, // ANEURALNETWORKS_OEM
  219. false, // ANEURALNETWORKS_TENSOR_OEM_BYTE
  220. };
  221. static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM,
  222. "kScalarDataTypeOEM is incorrect");
  223. bool nonExtensionOperandTypeIsScalar(int type) {
  224. CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported";
  225. return tableLookup(kScalarDataType, kScalarDataTypeOEM, type);
  226. }
  227. uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
  228. CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown";
  229. int n = static_cast<int>(type);
  230. uint32_t size = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
  231. if (tableLookup(kScalarDataType, kScalarDataTypeOEM, n) == true) {
  232. return size;
  233. }
  234. if (dimensions.empty()) {
  235. return 0;
  236. }
  237. for (auto d : dimensions) {
  238. size *= d;
  239. }
  240. return size;
  241. }
  242. bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) {
  243. if (!isExtensionOperandType(type)) {
  244. CHECK(!nonExtensionOperandTypeIsScalar(type))
  245. << "A scalar type can never have unspecified dimensions";
  246. }
  247. return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount);
  248. }
  249. bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
  250. return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
  251. }
  252. bool tensorHasUnspecifiedDimensions(const Operand& operand) {
  253. return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(),
  254. operand.dimensions.size());
  255. }
  256. hidl_memory allocateSharedMemory(int64_t size) {
  257. static const std::string type = "ashmem";
  258. static sp<IAllocator> allocator = IAllocator::getService(type);
  259. hidl_memory memory;
  260. // TODO: should we align memory size to nearest page? doesn't seem necessary...
  261. allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
  262. if (!success) {
  263. LOG(ERROR) << "unable to allocate " << size << " bytes of " << type;
  264. } else {
  265. memory = mem;
  266. }
  267. });
  268. return memory;
  269. }
  270. uint32_t alignBytesNeeded(uint32_t index, size_t length) {
  271. uint32_t pattern;
  272. if (length < 2) {
  273. pattern = 0; // No alignment necessary
  274. } else if (length < 4) {
  275. pattern = 1; // Align on 2-byte boundary
  276. } else {
  277. pattern = 3; // Align on 4-byte boundary
  278. }
  279. uint32_t extra = (~(index - 1)) & pattern;
  280. return extra;
  281. }
  282. void logModelToInfo(const V1_0::Model& model) {
  283. LOG(INFO) << "V1_0::Model start";
  284. LOG(INFO) << "operands" << toString(model.operands);
  285. LOG(INFO) << "operations" << toString(model.operations);
  286. LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
  287. LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
  288. LOG(INFO) << "operandValues size" << model.operandValues.size();
  289. LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
  290. }
  291. void logModelToInfo(const V1_1::Model& model) {
  292. LOG(INFO) << "V1_1::Model start";
  293. LOG(INFO) << "operands" << toString(model.operands);
  294. LOG(INFO) << "operations" << toString(model.operations);
  295. LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
  296. LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
  297. LOG(INFO) << "operandValues size" << model.operandValues.size();
  298. LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
  299. }
  300. bool validateOperandSymmPerChannelQuantParams(
  301. const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant,
  302. const char* tag) {
  303. if (halOperand.type != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
  304. return false;
  305. }
  306. NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag;
  307. NN_RET_CHECK(channelQuant.scales != nullptr) << tag;
  308. NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag;
  309. NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u)
  310. << tag << " channel dimension " << channelQuant.channelDim << " is underspecified";
  311. for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) {
  312. NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]";
  313. }
  314. return true;
  315. }
  316. static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) {
  317. NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type";
  318. NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type";
  319. return true;
  320. }
  321. static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
  322. NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255)
  323. << tag << " invalid zeroPoint: " << type.zeroPoint;
  324. NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
  325. return true;
  326. }
  327. static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
  328. NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint;
  329. NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
  330. return true;
  331. }
  332. static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
  333. NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535)
  334. << tag << " invalid zeroPoint: " << type.zeroPoint;
  335. NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
  336. return true;
  337. }
  338. static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
  339. NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
  340. NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
  341. return true;
  342. }
  343. static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) {
  344. NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
  345. NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero";
  346. return true;
  347. }
  348. static bool validateTensorDimensions(const ANeuralNetworksOperandType& type, const char* tag,
  349. bool allowPartial) {
  350. if (allowPartial) {
  351. return true;
  352. }
  353. NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions";
  354. for (uint32_t i = 0; i < type.dimensionCount; i++) {
  355. NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions";
  356. }
  357. return true;
  358. }
  359. static bool validateOperandTypeHelper(
  360. const ANeuralNetworksOperandType& type,
  361. const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag,
  362. bool allowPartial) {
  363. NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr);
  364. if (isExtensionOperandType(type.type)) {
  365. NN_RET_CHECK(extensionOperandTypeInfo != nullptr);
  366. if (extensionOperandTypeInfo->isTensor) {
  367. NN_RET_CHECK(validateTensorDimensions(type, tag, allowPartial));
  368. } else {
  369. NN_RET_CHECK(validateScalarDimensions(type, tag));
  370. }
  371. return validateNoQuantParams(type, tag);
  372. }
  373. NN_RET_CHECK(extensionOperandTypeInfo == nullptr);
  374. NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type))
  375. << tag << " invalid OperandType: " << type.type;
  376. bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type);
  377. if (isScalar) {
  378. NN_RET_CHECK(validateScalarDimensions(type, tag));
  379. if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types
  380. // to use quantization parameters.
  381. NN_RET_CHECK(validateNoQuantParams(type, tag));
  382. }
  383. } else {
  384. NN_RET_CHECK(validateTensorDimensions(type, tag, allowPartial));
  385. if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
  386. NN_RET_CHECK(validateQuant8AsymmParams(type, tag));
  387. } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) {
  388. NN_RET_CHECK(validateQuant8SymmParams(type, tag));
  389. } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) {
  390. NN_RET_CHECK(validateQuant16AsymmParams(type, tag));
  391. } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) {
  392. NN_RET_CHECK(validateQuantSymmParams(type, tag));
  393. } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) {
  394. // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters.
  395. } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) {
  396. // Historically, we have allowed OEM types to use quantization parameters.
  397. } else {
  398. NN_RET_CHECK(validateNoQuantParams(type, tag));
  399. }
  400. }
  401. return true;
  402. }
  403. int validateOperandType(const ANeuralNetworksOperandType& type,
  404. const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
  405. const char* tag, bool allowPartial) {
  406. return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial)
  407. ? ANEURALNETWORKS_NO_ERROR
  408. : ANEURALNETWORKS_BAD_DATA;
  409. }
  410. int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
  411. const char* tag) {
  412. for (uint32_t i = 0; i < count; i++) {
  413. if (list[i] >= operandCount) {
  414. LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i]
  415. << ", operandCount " << operandCount;
  416. return ANEURALNETWORKS_BAD_DATA;
  417. }
  418. }
  419. return ANEURALNETWORKS_NO_ERROR;
  420. }
  421. int validateOperationOperandTypes(const std::vector<Operand>& operands,
  422. uint32_t inOperandCount, const uint32_t* inOperandIndexes,
  423. const std::vector<OperandType>& inExpectedTypes,
  424. uint32_t outOperandCount, const uint32_t* outOperandIndexes,
  425. const std::vector<OperandType>& outExpectedInTypes) {
  426. if (inOperandCount != static_cast<uint32_t>(inExpectedTypes.size()) ||
  427. outOperandCount != static_cast<uint32_t>(outExpectedInTypes.size())) {
  428. LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and "
  429. << outExpectedInTypes.size() << " outputs,"
  430. << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs";
  431. return ANEURALNETWORKS_BAD_DATA;
  432. }
  433. for (uint32_t i = 0; i < inOperandCount; i++) {
  434. if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
  435. LOG(ERROR) << "Invalid input tensor type "
  436. << toString(operands[inOperandIndexes[i]].type)
  437. << " for input " << i << ", expected " << toString(inExpectedTypes[i]);
  438. return ANEURALNETWORKS_BAD_DATA;
  439. }
  440. }
  441. for (uint32_t i = 0; i < outOperandCount; i++) {
  442. if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
  443. LOG(ERROR) << "Invalid output tensor type "
  444. << toString(operands[outOperandIndexes[i]].type)
  445. << " for input " << i << ", expected " << toString(outExpectedInTypes[i]);
  446. return ANEURALNETWORKS_BAD_DATA;
  447. }
  448. }
  449. return ANEURALNETWORKS_NO_ERROR;
  450. }
  451. static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
  452. HalVersion minSupportedHalVersion) {
  453. if (halVersion < minSupportedHalVersion) {
  454. LOG(ERROR) << "The given inputs and outputs for operation " << getOperationName(opType)
  455. << " are only supported in " << toString(minSupportedHalVersion)
  456. << " and later (validating using " << toString(halVersion) << ")";
  457. return ANEURALNETWORKS_BAD_DATA;
  458. }
  459. return ANEURALNETWORKS_NO_ERROR;
  460. }
  461. int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
  462. const uint32_t* inputIndexes, uint32_t outputCount,
  463. const uint32_t* outputIndexes, const std::vector<Operand>& operands,
  464. HalVersion halVersion) {
  465. NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes,
  466. static_cast<uint32_t>(operands.size()),
  467. "ANeuralNetworksModel_addOperation inputs"));
  468. NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes,
  469. static_cast<uint32_t>(operands.size()),
  470. "ANeuralNetworksModel_addOperation outputs"));
  471. if (isExtensionOperationType(opType)) {
  472. if (halVersion < HalVersion::V1_2) {
  473. LOG(ERROR)
  474. << "Extension operations are supported since HAL version 1.2, validating using "
  475. << toString(halVersion);
  476. return ANEURALNETWORKS_BAD_DATA;
  477. }
  478. // There is no other validation we can do for an extension operation.
  479. return ANEURALNETWORKS_NO_ERROR;
  480. }
  481. auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
  482. LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn
  483. << ") or output operands (" << outputCount << ", expected " << expOut
  484. << ") for operation " << getOperationName(opType);
  485. };
  486. switch (opType) {
  487. case ANEURALNETWORKS_OEM_OPERATION: {
  488. return ANEURALNETWORKS_NO_ERROR;
  489. }
  490. case ANEURALNETWORKS_FLOOR: {
  491. if (inputCount != 1 || outputCount != 1) {
  492. logInvalidInOutNumber(1, 1);
  493. return ANEURALNETWORKS_BAD_DATA;
  494. }
  495. auto inputType = operands[inputIndexes[0]].type;
  496. std::vector<OperandType> inExpectedTypes;
  497. std::vector<OperandType> outExpectedTypes;
  498. if (inputType == OperandType::TENSOR_FLOAT32) {
  499. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  500. inExpectedTypes = {OperandType::TENSOR_FLOAT32};
  501. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  502. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  503. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  504. inExpectedTypes = {OperandType::TENSOR_FLOAT16};
  505. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  506. } else {
  507. LOG(ERROR) << "Unsupported input tensor type for operation "
  508. << getOperationName(opType);
  509. return ANEURALNETWORKS_BAD_DATA;
  510. }
  511. return validateOperationOperandTypes(operands,
  512. inputCount, inputIndexes,
  513. inExpectedTypes,
  514. outputCount, outputIndexes,
  515. outExpectedTypes);
  516. }
  517. case ANEURALNETWORKS_DEPTHWISE_CONV_2D: {
  518. if ((inputCount != 14 && inputCount != 12 && inputCount != 11 && inputCount != 9 &&
  519. inputCount != 8) ||
  520. outputCount != 1) {
  521. LOG(ERROR) << "Invalid number of input operands (" << inputCount
  522. << ", expected 14, 12, 11, 9 or 8) or output operands (" << outputCount
  523. << ", expected 1) for operation " << getOperationName(opType);
  524. return ANEURALNETWORKS_BAD_DATA;
  525. }
  526. auto inputType = operands[inputIndexes[0]].type;
  527. auto filterType = operands[inputIndexes[1]].type;
  528. std::vector<OperandType> inExpectedTypes;
  529. std::vector<OperandType> outExpectedTypes;
  530. if (inputType == OperandType::TENSOR_FLOAT32) {
  531. inExpectedTypes = {
  532. OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
  533. OperandType::TENSOR_FLOAT32, OperandType::INT32,
  534. OperandType::INT32, OperandType::INT32,
  535. OperandType::INT32, OperandType::INT32,
  536. };
  537. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  538. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  539. inExpectedTypes = {
  540. OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
  541. OperandType::TENSOR_FLOAT16, OperandType::INT32,
  542. OperandType::INT32, OperandType::INT32,
  543. OperandType::INT32, OperandType::INT32,
  544. };
  545. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  546. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  547. if (filterType != OperandType::TENSOR_QUANT8_ASYMM &&
  548. filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
  549. LOG(ERROR) << "Unsupported filter tensor type for operation "
  550. << getOperationName(opType);
  551. return ANEURALNETWORKS_BAD_DATA;
  552. }
  553. inExpectedTypes = {
  554. OperandType::TENSOR_QUANT8_ASYMM,
  555. filterType,
  556. OperandType::TENSOR_INT32,
  557. OperandType::INT32,
  558. OperandType::INT32,
  559. OperandType::INT32,
  560. OperandType::INT32,
  561. OperandType::INT32,
  562. };
  563. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  564. } else {
  565. LOG(ERROR) << "Unsupported input tensor type for operation "
  566. << getOperationName(opType);
  567. return ANEURALNETWORKS_BAD_DATA;
  568. }
  569. // NeuralNetworks.h specifies that ANEURALNETWORKS_DEPTHWISE_CONV_2D's output must
  570. // meet "outputScale > inputScale * filterScale" for the operand type
  571. // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM before API level 29. For other
  572. // operand types (e.g., ANEURALNETWORKS_TENSOR_FLOAT32), this constraint
  573. // does not apply, so by default the constraint is met.
  574. bool meetsQuantizedScaleConstraintBeforeV1_2 = true;
  575. if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  576. const float inputScale = operands[inputIndexes[0]].scale;
  577. const float filterScale = operands[inputIndexes[1]].scale;
  578. const float outputScale = operands[outputIndexes[0]].scale;
  579. meetsQuantizedScaleConstraintBeforeV1_2 = (outputScale > inputScale * filterScale);
  580. }
  581. bool withExplicitPadding = false;
  582. bool withLayout = false;
  583. bool withDilation = false;
  584. if (inputCount >= 9) {
  585. if (operands[inputIndexes[8]].type == OperandType::INT32 && inputCount >= 11) {
  586. std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
  587. inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
  588. explicitScalarTypes.end());
  589. withExplicitPadding = true;
  590. }
  591. int inputOffset = withExplicitPadding ? 3 : 0;
  592. if (inputCount >= 9 + inputOffset) {
  593. inExpectedTypes.push_back(OperandType::BOOL);
  594. withLayout = true;
  595. }
  596. if (inputCount == 10 + inputOffset) {
  597. LOG(ERROR) << "Provided only one dilation factor value, two values are requred "
  598. "for operation "
  599. << getOperationName(opType);
  600. return ANEURALNETWORKS_BAD_DATA;
  601. }
  602. if (inputCount == 11 + inputOffset) {
  603. inExpectedTypes.push_back(OperandType::INT32);
  604. inExpectedTypes.push_back(OperandType::INT32);
  605. withDilation = true;
  606. }
  607. }
  608. if (inputType == OperandType::TENSOR_FLOAT16 ||
  609. filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || withLayout ||
  610. withDilation || !meetsQuantizedScaleConstraintBeforeV1_2) {
  611. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  612. } else {
  613. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  614. }
  615. return validateOperationOperandTypes(operands,
  616. inputCount, inputIndexes,
  617. inExpectedTypes,
  618. outputCount, outputIndexes,
  619. outExpectedTypes);
  620. }
  621. case ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION: {
  622. if ((inputCount != 6 && inputCount != 5) || outputCount != 1) {
  623. LOG(ERROR) << "Invalid number of input operands (" << inputCount
  624. << ", expected 6 or 5) or output operands (" << outputCount
  625. << ", expected 1) for operation " << getOperationName(opType);
  626. return ANEURALNETWORKS_BAD_DATA;
  627. }
  628. auto inputType = operands[inputIndexes[0]].type;
  629. std::vector<OperandType> inExpectedTypes;
  630. std::vector<OperandType> outExpectedTypes;
  631. if (inputType == OperandType::TENSOR_FLOAT32) {
  632. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  633. inExpectedTypes = {
  634. OperandType::TENSOR_FLOAT32, OperandType::INT32, OperandType::FLOAT32,
  635. OperandType::FLOAT32, OperandType::FLOAT32,
  636. };
  637. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  638. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  639. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  640. inExpectedTypes = {
  641. OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::FLOAT16,
  642. OperandType::FLOAT16, OperandType::FLOAT16,
  643. };
  644. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  645. } else {
  646. LOG(ERROR) << "Unsupported input tensor type for operation "
  647. << getOperationName(opType);
  648. return ANEURALNETWORKS_BAD_DATA;
  649. }
  650. if (inputCount == 6) {
  651. inExpectedTypes.push_back(OperandType::INT32);
  652. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  653. } else if (operands[inputIndexes[0]].dimensions.size() != 4) {
  654. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  655. }
  656. return validateOperationOperandTypes(operands,
  657. inputCount, inputIndexes,
  658. inExpectedTypes,
  659. outputCount, outputIndexes,
  660. outExpectedTypes);
  661. }
  662. case ANEURALNETWORKS_RESHAPE: {
  663. if (inputCount != 2 || outputCount != 1) {
  664. logInvalidInOutNumber(2, 1);
  665. return ANEURALNETWORKS_BAD_DATA;
  666. }
  667. auto inputType = operands[inputIndexes[0]].type;
  668. std::vector<OperandType> inExpectedTypes;
  669. std::vector<OperandType> outExpectedTypes;
  670. if (inputType == OperandType::TENSOR_FLOAT32) {
  671. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  672. inExpectedTypes = {OperandType::TENSOR_FLOAT32,
  673. OperandType::TENSOR_INT32};
  674. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  675. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  676. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  677. inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
  678. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  679. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  680. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  681. inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
  682. OperandType::TENSOR_INT32};
  683. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  684. } else {
  685. LOG(ERROR) << "Unsupported input tensor type for operation "
  686. << getOperationName(opType);
  687. return ANEURALNETWORKS_BAD_DATA;
  688. }
  689. return validateOperationOperandTypes(operands,
  690. inputCount, inputIndexes,
  691. inExpectedTypes,
  692. outputCount, outputIndexes,
  693. outExpectedTypes);
  694. }
  695. case ANEURALNETWORKS_DEPTH_TO_SPACE: {
  696. if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
  697. LOG(ERROR) << "Invalid number of input operands (" << inputCount
  698. << ", expected 3 or 2) or output operands (" << outputCount
  699. << ", expected 1) for operation " << getOperationName(opType);
  700. return ANEURALNETWORKS_BAD_DATA;
  701. }
  702. auto inputType = operands[inputIndexes[0]].type;
  703. std::vector<OperandType> inExpectedTypes;
  704. std::vector<OperandType> outExpectedTypes;
  705. if (inputType == OperandType::TENSOR_FLOAT32) {
  706. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  707. inExpectedTypes = {OperandType::TENSOR_FLOAT32,
  708. OperandType::INT32};
  709. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  710. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  711. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  712. inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
  713. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  714. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  715. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  716. inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
  717. OperandType::INT32};
  718. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  719. } else {
  720. LOG(ERROR) << "Unsupported input tensor type for operation "
  721. << getOperationName(opType);
  722. return ANEURALNETWORKS_BAD_DATA;
  723. }
  724. if (inputCount == 3) {
  725. inExpectedTypes.push_back(OperandType::BOOL);
  726. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  727. } else {
  728. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  729. }
  730. return validateOperationOperandTypes(operands,
  731. inputCount, inputIndexes,
  732. inExpectedTypes,
  733. outputCount, outputIndexes,
  734. outExpectedTypes);
  735. }
  736. case ANEURALNETWORKS_SPACE_TO_DEPTH: {
  737. if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
  738. LOG(ERROR) << "Invalid number of input operands (" << inputCount
  739. << ", expected 3 or 2) or output operands (" << outputCount
  740. << ", expected 1) for operation " << getOperationName(opType);
  741. return ANEURALNETWORKS_BAD_DATA;
  742. }
  743. auto inputType = operands[inputIndexes[0]].type;
  744. std::vector<OperandType> inExpectedTypes;
  745. std::vector<OperandType> outExpectedTypes;
  746. if (inputType == OperandType::TENSOR_FLOAT32) {
  747. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  748. inExpectedTypes = {OperandType::TENSOR_FLOAT32,
  749. OperandType::INT32};
  750. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  751. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  752. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  753. inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
  754. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  755. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  756. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  757. inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
  758. OperandType::INT32};
  759. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  760. } else {
  761. LOG(ERROR) << "Unsupported input tensor type for operation "
  762. << getOperationName(opType);
  763. return ANEURALNETWORKS_BAD_DATA;
  764. }
  765. if (inputCount == 3) {
  766. inExpectedTypes.push_back(OperandType::BOOL);
  767. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  768. } else {
  769. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  770. }
  771. return validateOperationOperandTypes(operands,
  772. inputCount, inputIndexes,
  773. inExpectedTypes,
  774. outputCount, outputIndexes,
  775. outExpectedTypes);
  776. }
  777. case ANEURALNETWORKS_EMBEDDING_LOOKUP: {
  778. if (inputCount != 2 || outputCount != 1) {
  779. logInvalidInOutNumber(2, 1);
  780. return ANEURALNETWORKS_BAD_DATA;
  781. }
  782. auto inputType = operands[inputIndexes[1]].type;
  783. if (inputType != OperandType::TENSOR_FLOAT32 &&
  784. inputType != OperandType::TENSOR_INT32 &&
  785. inputType != OperandType::TENSOR_QUANT8_ASYMM) {
  786. LOG(ERROR) << "Unsupported input tensor type for operation "
  787. << getOperationName(opType);
  788. return ANEURALNETWORKS_BAD_DATA;
  789. }
  790. std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
  791. inputType};
  792. std::vector<OperandType> outExpectedTypes = {inputType};
  793. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  794. return validateOperationOperandTypes(operands,
  795. inputCount, inputIndexes,
  796. inExpectedTypes,
  797. outputCount, outputIndexes,
  798. outExpectedTypes);
  799. }
  800. case ANEURALNETWORKS_HASHTABLE_LOOKUP: {
  801. if (inputCount != 3 || outputCount != 2) {
  802. logInvalidInOutNumber(3, 2);
  803. return ANEURALNETWORKS_BAD_DATA;
  804. }
  805. auto inputType = operands[inputIndexes[2]].type;
  806. if (inputType != OperandType::TENSOR_FLOAT32 &&
  807. inputType != OperandType::TENSOR_INT32 &&
  808. inputType != OperandType::TENSOR_QUANT8_ASYMM) {
  809. LOG(ERROR) << "Unsupported input tensor type for operation "
  810. << getOperationName(opType);
  811. return ANEURALNETWORKS_BAD_DATA;
  812. }
  813. std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
  814. OperandType::TENSOR_INT32,
  815. inputType};
  816. std::vector<OperandType> outExpectedTypes = {inputType,
  817. OperandType::TENSOR_QUANT8_ASYMM};
  818. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  819. return validateOperationOperandTypes(operands,
  820. inputCount, inputIndexes,
  821. inExpectedTypes,
  822. outputCount, outputIndexes,
  823. outExpectedTypes);
  824. }
  825. case ANEURALNETWORKS_LSH_PROJECTION: {
  826. if (inputCount != 4 || outputCount != 1) {
  827. logInvalidInOutNumber(4, 1);
  828. return ANEURALNETWORKS_BAD_DATA;
  829. }
  830. auto inputType = operands[inputIndexes[1]].type;
  831. if (inputType != OperandType::TENSOR_FLOAT16 &&
  832. inputType != OperandType::TENSOR_FLOAT32 &&
  833. inputType != OperandType::TENSOR_INT32 &&
  834. inputType != OperandType::TENSOR_QUANT8_ASYMM) {
  835. LOG(ERROR) << "Unsupported input tensor type for operation "
  836. << getOperationName(opType);
  837. return ANEURALNETWORKS_BAD_DATA;
  838. }
  839. auto hashType = operands[inputIndexes[0]].type;
  840. std::vector<OperandType> inExpectedTypes;
  841. if (hashType == OperandType::TENSOR_FLOAT16) {
  842. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  843. inExpectedTypes = {
  844. OperandType::TENSOR_FLOAT16,
  845. inputType,
  846. OperandType::TENSOR_FLOAT16,
  847. OperandType::INT32,
  848. };
  849. } else if (hashType == OperandType::TENSOR_FLOAT32) {
  850. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  851. inExpectedTypes = {
  852. OperandType::TENSOR_FLOAT32,
  853. inputType,
  854. OperandType::TENSOR_FLOAT32,
  855. OperandType::INT32,
  856. };
  857. } else {
  858. LOG(ERROR) << "Unsupported hash tensor type for operation "
  859. << getOperationName(opType);
  860. return ANEURALNETWORKS_BAD_DATA;
  861. }
  862. std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
  863. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  864. inExpectedTypes, outputCount, outputIndexes,
  865. outExpectedTypes);
  866. }
  867. case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: {
  868. std::vector<OperandType> inExpectedTypes;
  869. auto inputType = operands[inputIndexes[0]].type;
  870. std::vector<OperandType> outExpectedTypes{inputType, inputType};
  871. std::vector<OperandType> outExpectedTypesMerged{inputType};
  872. if (inputType != OperandType::TENSOR_FLOAT32 &&
  873. inputType != OperandType::TENSOR_FLOAT16) {
  874. LOG(ERROR) << "Unsupported input tensor type for operation "
  875. << getOperationName(opType);
  876. return ANEURALNETWORKS_BAD_DATA;
  877. }
  878. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  879. inExpectedTypes = {};
  880. for (int i = 0; i < 48; ++i) {
  881. inExpectedTypes.push_back(inputType);
  882. }
  883. inExpectedTypes.push_back(OperandType::INT32);
  884. inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
  885. ? OperandType::FLOAT32
  886. : OperandType::FLOAT16);
  887. inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
  888. ? OperandType::FLOAT32
  889. : OperandType::FLOAT16);
  890. inExpectedTypes.push_back(OperandType::BOOL);
  891. inExpectedTypes.push_back(OperandType::BOOL);
  892. for (int i = 0; i < 8; ++i) {
  893. inExpectedTypes.push_back(inputType);
  894. }
  895. if (inputCount != 61 || (outputCount != 1 && outputCount != 2)) {
  896. LOG(ERROR) << "Invalid number of input operands (" << inputCount
  897. << ", expected 61) or output operands (" << outputCount
  898. << ", expected 1 or 2) for operation " << getOperationName(opType);
  899. return ANEURALNETWORKS_BAD_DATA;
  900. }
  901. auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes,
  902. inExpectedTypes, outputCount, outputIndexes,
  903. outExpectedTypes);
  904. if (status != ANEURALNETWORKS_NO_ERROR) {
  905. status = validateOperationOperandTypes(operands, inputCount, inputIndexes,
  906. inExpectedTypes, outputCount, outputIndexes,
  907. outExpectedTypesMerged);
  908. }
  909. return status;
  910. }
  911. case ANEURALNETWORKS_LSTM: {
  912. std::vector<OperandType> inExpectedTypes;
  913. std::vector<OperandType> outExpectedTypes;
  914. auto inputType = operands[inputIndexes[0]].type;
  915. if (inputType != OperandType::TENSOR_FLOAT32 &&
  916. inputType != OperandType::TENSOR_FLOAT16) {
  917. LOG(ERROR) << "Unsupported input tensor type for operation "
  918. << getOperationName(opType);
  919. return ANEURALNETWORKS_BAD_DATA;
  920. }
  921. inExpectedTypes = {inputType, inputType, inputType, inputType, inputType,
  922. inputType, inputType, inputType, inputType, inputType,
  923. inputType, inputType, inputType, inputType, inputType,
  924. inputType, inputType, inputType, inputType, inputType,
  925. OperandType::INT32};
  926. if (inputType == OperandType::TENSOR_FLOAT32) {
  927. inExpectedTypes.push_back(OperandType::FLOAT32);
  928. inExpectedTypes.push_back(OperandType::FLOAT32);
  929. } else {
  930. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  931. inExpectedTypes.push_back(OperandType::FLOAT16);
  932. inExpectedTypes.push_back(OperandType::FLOAT16);
  933. }
  934. outExpectedTypes = {inputType, inputType, inputType, inputType};
  935. if (inputCount == 23 && outputCount == 4) {
  936. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  937. } else if (inputCount == 27 && outputCount == 4) {
  938. for (int i = 0; i < 4; ++i) {
  939. inExpectedTypes.push_back(inputType);
  940. }
  941. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  942. } else {
  943. LOG(ERROR) << "Invalid number of input operands (" << inputCount
  944. << ", expected 23 or 27) or output operands (" << outputCount
  945. << ", expected 4) for operation " << getOperationName(opType);
  946. return ANEURALNETWORKS_BAD_DATA;
  947. }
  948. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  949. inExpectedTypes, outputCount, outputIndexes,
  950. outExpectedTypes);
  951. }
  952. case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: {
  953. if (inputCount != 15 || outputCount != 2) {
  954. logInvalidInOutNumber(15, 2);
  955. return ANEURALNETWORKS_BAD_DATA;
  956. }
  957. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  958. std::vector<OperandType> inExpectedTypes = {
  959. OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
  960. OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
  961. OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
  962. OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
  963. OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32,
  964. OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
  965. OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM,
  966. OperandType::TENSOR_QUANT8_ASYMM};
  967. std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM,
  968. OperandType::TENSOR_QUANT8_ASYMM};
  969. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  970. inExpectedTypes, outputCount, outputIndexes,
  971. outExpectedTypes);
  972. }
  973. case ANEURALNETWORKS_RANDOM_MULTINOMIAL: {
  974. if (inputCount != 3 || outputCount != 1) {
  975. logInvalidInOutNumber(3, 1);
  976. return ANEURALNETWORKS_BAD_DATA;
  977. }
  978. OperandType inputType = operands[inputIndexes[0]].type;
  979. std::vector<OperandType> inExpectedTypes;
  980. if (inputType == OperandType::TENSOR_FLOAT32 ||
  981. inputType == OperandType::TENSOR_FLOAT16) {
  982. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  983. inExpectedTypes = {
  984. inputType,
  985. OperandType::INT32,
  986. OperandType::TENSOR_INT32,
  987. };
  988. } else {
  989. LOG(ERROR) << "Unsupported input tensor type for operation "
  990. << getOperationName(opType);
  991. return ANEURALNETWORKS_BAD_DATA;
  992. }
  993. std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
  994. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  995. inExpectedTypes, outputCount, outputIndexes,
  996. outExpectedTypes);
  997. }
  998. case ANEURALNETWORKS_RNN: {
  999. if (inputCount != 6 || outputCount != 2) {
  1000. logInvalidInOutNumber(6, 2);
  1001. return ANEURALNETWORKS_BAD_DATA;
  1002. }
  1003. OperandType inputType = operands[inputIndexes[0]].type;
  1004. std::vector<OperandType> inExpectedTypes;
  1005. std::vector<OperandType> outExpectedTypes;
  1006. if (inputType == OperandType::TENSOR_FLOAT32) {
  1007. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  1008. inExpectedTypes = {
  1009. OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
  1010. OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
  1011. OperandType::TENSOR_FLOAT32, OperandType::INT32,
  1012. };
  1013. outExpectedTypes = {
  1014. OperandType::TENSOR_FLOAT32,
  1015. OperandType::TENSOR_FLOAT32,
  1016. };
  1017. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1018. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1019. inExpectedTypes = {
  1020. OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
  1021. OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
  1022. OperandType::TENSOR_FLOAT16, OperandType::INT32,
  1023. };
  1024. outExpectedTypes = {
  1025. OperandType::TENSOR_FLOAT16,
  1026. OperandType::TENSOR_FLOAT16,
  1027. };
  1028. } else {
  1029. LOG(ERROR) << "Unsupported input tensor type for operation "
  1030. << getOperationName(opType);
  1031. return ANEURALNETWORKS_BAD_DATA;
  1032. }
  1033. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1034. inExpectedTypes, outputCount, outputIndexes,
  1035. outExpectedTypes);
  1036. }
  1037. case ANEURALNETWORKS_SVDF: {
  1038. if (inputCount != 7 || outputCount != 2) {
  1039. logInvalidInOutNumber(7, 2);
  1040. return ANEURALNETWORKS_BAD_DATA;
  1041. }
  1042. OperandType inputType = operands[inputIndexes[0]].type;
  1043. if (inputType == OperandType::TENSOR_FLOAT32) {
  1044. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
  1045. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1046. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1047. } else {
  1048. LOG(ERROR) << "Unsupported input tensor type for operation "
  1049. << getOperationName(opType);
  1050. return ANEURALNETWORKS_BAD_DATA;
  1051. }
  1052. std::vector<OperandType> inExpectedTypes = {
  1053. inputType, inputType, inputType, inputType,
  1054. inputType, OperandType::INT32, OperandType::INT32,
  1055. };
  1056. std::vector<OperandType> outExpectedTypes = {inputType, inputType};
  1057. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1058. inExpectedTypes, outputCount, outputIndexes,
  1059. outExpectedTypes);
  1060. }
  1061. case ANEURALNETWORKS_BATCH_TO_SPACE_ND: {
  1062. if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
  1063. LOG(ERROR) << "Invalid number of input operands (" << inputCount
  1064. << ", expected 3 or 2) or output operands (" << outputCount
  1065. << ", expected 1) for operation " << getOperationName(opType);
  1066. return ANEURALNETWORKS_BAD_DATA;
  1067. }
  1068. auto inputType = operands[inputIndexes[0]].type;
  1069. std::vector<OperandType> inExpectedTypes;
  1070. std::vector<OperandType> outExpectedTypes;
  1071. if (inputType == OperandType::TENSOR_FLOAT32) {
  1072. inExpectedTypes = {
  1073. OperandType::TENSOR_FLOAT32,
  1074. OperandType::TENSOR_INT32,
  1075. };
  1076. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  1077. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1078. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1079. inExpectedTypes = {
  1080. OperandType::TENSOR_FLOAT16,
  1081. OperandType::TENSOR_INT32,
  1082. };
  1083. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  1084. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1085. inExpectedTypes = {
  1086. OperandType::TENSOR_QUANT8_ASYMM,
  1087. OperandType::TENSOR_INT32,
  1088. };
  1089. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  1090. } else {
  1091. LOG(ERROR) << "Unsupported input tensor type for operation "
  1092. << getOperationName(opType);
  1093. return ANEURALNETWORKS_BAD_DATA;
  1094. }
  1095. if (inputCount == 3) {
  1096. inExpectedTypes.push_back(OperandType::BOOL);
  1097. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1098. } else {
  1099. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1100. }
  1101. return validateOperationOperandTypes(operands,
  1102. inputCount, inputIndexes,
  1103. inExpectedTypes,
  1104. outputCount, outputIndexes,
  1105. outExpectedTypes);
  1106. }
  1107. case ANEURALNETWORKS_SPACE_TO_BATCH_ND: {
  1108. if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
  1109. LOG(ERROR) << "Invalid number of input operands (" << inputCount
  1110. << ", expected 4 or 3) or output operands (" << outputCount
  1111. << ", expected 1) for operation " << getOperationName(opType);
  1112. return ANEURALNETWORKS_BAD_DATA;
  1113. }
  1114. auto inputType = operands[inputIndexes[0]].type;
  1115. std::vector<OperandType> inExpectedTypes;
  1116. std::vector<OperandType> outExpectedTypes;
  1117. if (inputType == OperandType::TENSOR_FLOAT32) {
  1118. inExpectedTypes = {
  1119. OperandType::TENSOR_FLOAT32,
  1120. OperandType::TENSOR_INT32,
  1121. OperandType::TENSOR_INT32,
  1122. };
  1123. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  1124. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1125. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1126. inExpectedTypes = {
  1127. OperandType::TENSOR_FLOAT16,
  1128. OperandType::TENSOR_INT32,
  1129. OperandType::TENSOR_INT32,
  1130. };
  1131. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  1132. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1133. if (operands[inputIndexes[0]].zeroPoint != 0) {
  1134. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1135. }
  1136. inExpectedTypes = {
  1137. OperandType::TENSOR_QUANT8_ASYMM,
  1138. OperandType::TENSOR_INT32,
  1139. OperandType::TENSOR_INT32,
  1140. };
  1141. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  1142. } else {
  1143. LOG(ERROR) << "Unsupported input tensor type for operation "
  1144. << getOperationName(opType);
  1145. return ANEURALNETWORKS_BAD_DATA;
  1146. }
  1147. if (inputCount == 4) {
  1148. inExpectedTypes.push_back(OperandType::BOOL);
  1149. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1150. } else {
  1151. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1152. }
  1153. return validateOperationOperandTypes(operands,
  1154. inputCount, inputIndexes,
  1155. inExpectedTypes,
  1156. outputCount, outputIndexes,
  1157. outExpectedTypes);
  1158. }
  1159. case ANEURALNETWORKS_PAD: {
  1160. if (inputCount != 2 || outputCount != 1) {
  1161. logInvalidInOutNumber(2, 1);
  1162. return ANEURALNETWORKS_BAD_DATA;
  1163. }
  1164. auto inputType = operands[inputIndexes[0]].type;
  1165. std::vector<OperandType> inExpectedTypes;
  1166. std::vector<OperandType> outExpectedTypes;
  1167. if (inputType == OperandType::TENSOR_FLOAT32) {
  1168. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1169. inExpectedTypes = {
  1170. OperandType::TENSOR_FLOAT32,
  1171. OperandType::TENSOR_INT32,
  1172. };
  1173. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  1174. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1175. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1176. inExpectedTypes = {
  1177. OperandType::TENSOR_FLOAT16,
  1178. OperandType::TENSOR_INT32,
  1179. };
  1180. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  1181. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1182. if (operands[inputIndexes[0]].zeroPoint == 0) {
  1183. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1184. } else {
  1185. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1186. }
  1187. inExpectedTypes = {
  1188. OperandType::TENSOR_QUANT8_ASYMM,
  1189. OperandType::TENSOR_INT32,
  1190. };
  1191. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  1192. } else {
  1193. LOG(ERROR) << "Unsupported input tensor type for operation "
  1194. << getOperationName(opType);
  1195. return ANEURALNETWORKS_BAD_DATA;
  1196. }
  1197. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1198. inExpectedTypes, outputCount, outputIndexes,
  1199. outExpectedTypes);
  1200. }
  1201. case ANEURALNETWORKS_PAD_V2: {
  1202. if (inputCount != 3 || outputCount != 1) {
  1203. logInvalidInOutNumber(3, 1);
  1204. return ANEURALNETWORKS_BAD_DATA;
  1205. }
  1206. auto inputType = operands[inputIndexes[0]].type;
  1207. std::vector<OperandType> inExpectedTypes;
  1208. std::vector<OperandType> outExpectedTypes;
  1209. if (inputType == OperandType::TENSOR_FLOAT32) {
  1210. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1211. inExpectedTypes = {
  1212. OperandType::TENSOR_FLOAT32,
  1213. OperandType::TENSOR_INT32,
  1214. OperandType::FLOAT32,
  1215. };
  1216. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  1217. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1218. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1219. inExpectedTypes = {
  1220. OperandType::TENSOR_FLOAT16,
  1221. OperandType::TENSOR_INT32,
  1222. OperandType::FLOAT16,
  1223. };
  1224. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  1225. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1226. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1227. inExpectedTypes = {
  1228. OperandType::TENSOR_QUANT8_ASYMM,
  1229. OperandType::TENSOR_INT32,
  1230. OperandType::INT32,
  1231. }; // TODO(b/116699425): Make it UINT8.
  1232. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  1233. } else {
  1234. LOG(ERROR) << "Unsupported input tensor type for operation "
  1235. << getOperationName(opType);
  1236. return ANEURALNETWORKS_BAD_DATA;
  1237. }
  1238. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1239. inExpectedTypes, outputCount, outputIndexes,
  1240. outExpectedTypes);
  1241. }
  1242. case ANEURALNETWORKS_CAST: {
  1243. if (inputCount != 1 || outputCount != 1) {
  1244. logInvalidInOutNumber(1, 1);
  1245. return ANEURALNETWORKS_BAD_DATA;
  1246. }
  1247. auto inputType = operands[inputIndexes[0]].type;
  1248. auto outputType = operands[outputIndexes[0]].type;
  1249. std::vector<OperandType> inExpectedTypes;
  1250. if (inputType == OperandType::TENSOR_FLOAT16 ||
  1251. inputType == OperandType::TENSOR_FLOAT32 ||
  1252. inputType == OperandType::TENSOR_INT32 ||
  1253. inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1254. inExpectedTypes = {inputType};
  1255. } else {
  1256. LOG(ERROR) << "Unsupported input tensor type for operation "
  1257. << getOperationName(opType);
  1258. return ANEURALNETWORKS_BAD_DATA;
  1259. }
  1260. std::vector<OperandType> outExpectedTypes;
  1261. if (outputType == OperandType::TENSOR_FLOAT16 ||
  1262. outputType == OperandType::TENSOR_FLOAT32 ||
  1263. outputType == OperandType::TENSOR_INT32 ||
  1264. outputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1265. outExpectedTypes = {outputType};
  1266. } else {
  1267. LOG(ERROR) << "Unsupported output tensor type for operation "
  1268. << getOperationName(opType);
  1269. return ANEURALNETWORKS_BAD_DATA;
  1270. }
  1271. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1272. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1273. inExpectedTypes, outputCount, outputIndexes,
  1274. outExpectedTypes);
  1275. }
  1276. case ANEURALNETWORKS_SQUEEZE: {
  1277. if (inputCount != 2 || outputCount != 1) {
  1278. logInvalidInOutNumber(2, 1);
  1279. return ANEURALNETWORKS_BAD_DATA;
  1280. }
  1281. auto inputType = operands[inputIndexes[0]].type;
  1282. std::vector<OperandType> inExpectedTypes;
  1283. std::vector<OperandType> outExpectedTypes;
  1284. if (inputType == OperandType::TENSOR_FLOAT32) {
  1285. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1286. inExpectedTypes = {OperandType::TENSOR_FLOAT32,
  1287. OperandType::TENSOR_INT32};
  1288. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  1289. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1290. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1291. inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
  1292. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  1293. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1294. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1295. inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
  1296. OperandType::TENSOR_INT32};
  1297. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  1298. } else {
  1299. LOG(ERROR) << "Unsupported input tensor type for operation "
  1300. << getOperationName(opType);
  1301. return ANEURALNETWORKS_BAD_DATA;
  1302. }
  1303. return validateOperationOperandTypes(operands,
  1304. inputCount, inputIndexes,
  1305. inExpectedTypes,
  1306. outputCount, outputIndexes,
  1307. outExpectedTypes);
  1308. }
  1309. case ANEURALNETWORKS_STRIDED_SLICE: {
  1310. if (inputCount != 7 || outputCount != 1) {
  1311. logInvalidInOutNumber(7, 1);
  1312. return ANEURALNETWORKS_BAD_DATA;
  1313. }
  1314. auto inputType = operands[inputIndexes[0]].type;
  1315. std::vector<OperandType> inExpectedTypes;
  1316. std::vector<OperandType> outExpectedTypes;
  1317. if (inputType == OperandType::TENSOR_FLOAT32) {
  1318. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1319. inExpectedTypes = {
  1320. OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32,
  1321. OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
  1322. OperandType::INT32, OperandType::INT32,
  1323. OperandType::INT32,
  1324. };
  1325. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  1326. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1327. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1328. inExpectedTypes = {
  1329. OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32,
  1330. OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
  1331. OperandType::INT32, OperandType::INT32,
  1332. OperandType::INT32,
  1333. };
  1334. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  1335. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1336. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1337. inExpectedTypes = {
  1338. OperandType::TENSOR_QUANT8_ASYMM,
  1339. OperandType::TENSOR_INT32,
  1340. OperandType::TENSOR_INT32,
  1341. OperandType::TENSOR_INT32,
  1342. OperandType::INT32,
  1343. OperandType::INT32,
  1344. OperandType::INT32,
  1345. };
  1346. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  1347. } else {
  1348. LOG(ERROR) << "Unsupported input tensor type for operation "
  1349. << getOperationName(opType);
  1350. return ANEURALNETWORKS_BAD_DATA;
  1351. }
  1352. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1353. inExpectedTypes, outputCount, outputIndexes,
  1354. outExpectedTypes);
  1355. }
  1356. case ANEURALNETWORKS_MEAN: {
  1357. if (inputCount != 3 || outputCount != 1) {
  1358. logInvalidInOutNumber(3, 1);
  1359. return ANEURALNETWORKS_BAD_DATA;
  1360. }
  1361. auto inputType = operands[inputIndexes[0]].type;
  1362. std::vector<OperandType> inExpectedTypes;
  1363. std::vector<OperandType> outExpectedTypes;
  1364. if (inputType == OperandType::TENSOR_FLOAT32) {
  1365. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1366. inExpectedTypes = {OperandType::TENSOR_FLOAT32,
  1367. OperandType::TENSOR_INT32,
  1368. OperandType::INT32};
  1369. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  1370. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1371. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1372. inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32,
  1373. OperandType::INT32};
  1374. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  1375. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1376. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
  1377. inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
  1378. OperandType::TENSOR_INT32,
  1379. OperandType::INT32};
  1380. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  1381. } else {
  1382. LOG(ERROR) << "Unsupported input tensor type for operation "
  1383. << getOperationName(opType);
  1384. return ANEURALNETWORKS_BAD_DATA;
  1385. }
  1386. return validateOperationOperandTypes(operands,
  1387. inputCount, inputIndexes,
  1388. inExpectedTypes,
  1389. outputCount, outputIndexes,
  1390. outExpectedTypes);
  1391. }
  1392. case ANEURALNETWORKS_ARGMAX:
  1393. case ANEURALNETWORKS_ARGMIN: {
  1394. if (inputCount != 2 || outputCount != 1) {
  1395. logInvalidInOutNumber(2, 1);
  1396. return ANEURALNETWORKS_BAD_DATA;
  1397. }
  1398. auto inputType = operands[inputIndexes[0]].type;
  1399. std::vector<OperandType> inExpectedTypes;
  1400. std::vector<OperandType> outExpectedTypes;
  1401. if (inputType == OperandType::TENSOR_FLOAT16 ||
  1402. inputType == OperandType::TENSOR_FLOAT32 ||
  1403. inputType == OperandType::TENSOR_INT32 ||
  1404. inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1405. inExpectedTypes = {inputType, OperandType::INT32};
  1406. outExpectedTypes = {OperandType::TENSOR_INT32};
  1407. } else {
  1408. LOG(ERROR) << "Unsupported input tensor type for operation "
  1409. << getOperationName(opType);
  1410. return ANEURALNETWORKS_BAD_DATA;
  1411. }
  1412. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1413. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1414. inExpectedTypes, outputCount, outputIndexes,
  1415. outExpectedTypes);
  1416. }
  1417. case ANEURALNETWORKS_EXPAND_DIMS: {
  1418. if (inputCount != 2 || outputCount != 1) {
  1419. logInvalidInOutNumber(2, 1);
  1420. return ANEURALNETWORKS_BAD_DATA;
  1421. }
  1422. auto inputType = operands[inputIndexes[0]].type;
  1423. std::vector<OperandType> inExpectedTypes;
  1424. std::vector<OperandType> outExpectedTypes;
  1425. if (inputType == OperandType::TENSOR_FLOAT16 ||
  1426. inputType == OperandType::TENSOR_FLOAT32 ||
  1427. inputType == OperandType::TENSOR_INT32 ||
  1428. inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1429. inExpectedTypes = {inputType, OperandType::INT32};
  1430. outExpectedTypes = {inputType};
  1431. } else {
  1432. LOG(ERROR) << "Unsupported input tensor type for operation "
  1433. << getOperationName(opType);
  1434. return ANEURALNETWORKS_BAD_DATA;
  1435. }
  1436. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1437. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1438. inExpectedTypes, outputCount, outputIndexes,
  1439. outExpectedTypes);
  1440. }
  1441. case ANEURALNETWORKS_SPLIT: {
  1442. if (inputCount != 3) {
  1443. LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)"
  1444. << getOperationName(opType);
  1445. return ANEURALNETWORKS_BAD_DATA;
  1446. }
  1447. auto inputType = operands[inputIndexes[0]].type;
  1448. if (inputType != OperandType::TENSOR_FLOAT16 &&
  1449. inputType != OperandType::TENSOR_FLOAT32 &&
  1450. inputType != OperandType::TENSOR_INT32 &&
  1451. inputType != OperandType::TENSOR_QUANT8_ASYMM) {
  1452. LOG(ERROR) << "Unsupported input tensor type for operation "
  1453. << getOperationName(opType);
  1454. return ANEURALNETWORKS_BAD_DATA;
  1455. }
  1456. std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32,
  1457. OperandType::INT32};
  1458. std::vector<OperandType> outExpectedTypes(outputCount, inputType);
  1459. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1460. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1461. inExpectedTypes, outputCount, outputIndexes,
  1462. outExpectedTypes);
  1463. }
  1464. case ANEURALNETWORKS_MAXIMUM:
  1465. case ANEURALNETWORKS_MINIMUM: {
  1466. if (inputCount != 2 || outputCount != 1) {
  1467. logInvalidInOutNumber(2, 1);
  1468. return ANEURALNETWORKS_BAD_DATA;
  1469. }
  1470. std::vector<OperandType> inExpectedTypes;
  1471. std::vector<OperandType> outExpectedTypes;
  1472. OperandType inputType = operands[inputIndexes[0]].type;
  1473. if (inputType == OperandType::TENSOR_FLOAT16 ||
  1474. inputType == OperandType::TENSOR_FLOAT32 ||
  1475. inputType == OperandType::TENSOR_INT32 ||
  1476. inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1477. inExpectedTypes = {inputType, inputType};
  1478. outExpectedTypes = {inputType};
  1479. } else {
  1480. LOG(ERROR) << "Unsupported input tensor type for operation "
  1481. << getOperationName(opType);
  1482. return ANEURALNETWORKS_BAD_DATA;
  1483. }
  1484. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1485. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1486. inExpectedTypes, outputCount, outputIndexes,
  1487. outExpectedTypes);
  1488. }
  1489. case ANEURALNETWORKS_GROUPED_CONV_2D: {
  1490. if ((inputCount != 12 && inputCount != 9) || outputCount != 1) {
  1491. LOG(ERROR) << "Invalid number of input operands (" << inputCount
  1492. << ", expected 12 or 9) or output operands (" << outputCount
  1493. << ", expected 1) for operation " << getOperationName(opType);
  1494. return ANEURALNETWORKS_BAD_DATA;
  1495. }
  1496. auto inputType = operands[inputIndexes[0]].type;
  1497. auto filterType = operands[inputIndexes[1]].type;
  1498. std::vector<OperandType> inExpectedTypes;
  1499. std::vector<OperandType> outExpectedTypes;
  1500. if (inputType == OperandType::TENSOR_FLOAT32) {
  1501. inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
  1502. OperandType::TENSOR_FLOAT32, OperandType::INT32,
  1503. OperandType::INT32, OperandType::INT32,
  1504. OperandType::INT32, OperandType::INT32};
  1505. outExpectedTypes = {OperandType::TENSOR_FLOAT32};
  1506. } else if (inputType == OperandType::TENSOR_FLOAT16) {
  1507. inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
  1508. OperandType::TENSOR_FLOAT16, OperandType::INT32,
  1509. OperandType::INT32, OperandType::INT32,
  1510. OperandType::INT32, OperandType::INT32};
  1511. outExpectedTypes = {OperandType::TENSOR_FLOAT16};
  1512. } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1513. if (filterType != OperandType::TENSOR_QUANT8_ASYMM &&
  1514. filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
  1515. LOG(ERROR) << "Unsupported filter tensor type for operation "
  1516. << getOperationName(opType);
  1517. return ANEURALNETWORKS_BAD_DATA;
  1518. }
  1519. if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
  1520. operands[inputIndexes[1]].extraParams.channelQuant().channelDim != 0) {
  1521. LOG(ERROR) << "Unsupported filter tensor channel dimension for operation "
  1522. << getOperationName(opType);
  1523. return ANEURALNETWORKS_BAD_DATA;
  1524. }
  1525. inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
  1526. filterType,
  1527. OperandType::TENSOR_INT32,
  1528. OperandType::INT32,
  1529. OperandType::INT32,
  1530. OperandType::INT32,
  1531. OperandType::INT32,
  1532. OperandType::INT32};
  1533. outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
  1534. } else {
  1535. LOG(ERROR) << "Unsupported input tensor type for operation "
  1536. << getOperationName(opType);
  1537. return ANEURALNETWORKS_BAD_DATA;
  1538. }
  1539. if (inputCount == 12) {
  1540. std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
  1541. inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
  1542. explicitScalarTypes.end());
  1543. }
  1544. inExpectedTypes.push_back(OperandType::BOOL);
  1545. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1546. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1547. inExpectedTypes, outputCount, outputIndexes,
  1548. outExpectedTypes);
  1549. }
  1550. case ANEURALNETWORKS_TILE: {
  1551. if (inputCount != 2 || outputCount != 1) {
  1552. logInvalidInOutNumber(2, 1);
  1553. return ANEURALNETWORKS_BAD_DATA;
  1554. }
  1555. auto inputType = operands[inputIndexes[0]].type;
  1556. std::vector<OperandType> inExpectedTypes;
  1557. std::vector<OperandType> outExpectedTypes;
  1558. if (inputType == OperandType::TENSOR_FLOAT16 ||
  1559. inputType == OperandType::TENSOR_FLOAT32 ||
  1560. inputType == OperandType::TENSOR_INT32 ||
  1561. inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1562. inExpectedTypes = {inputType, OperandType::TENSOR_INT32};
  1563. outExpectedTypes = {inputType};
  1564. } else {
  1565. LOG(ERROR) << "Unsupported input tensor type for operation "
  1566. << getOperationName(opType);
  1567. return ANEURALNETWORKS_BAD_DATA;
  1568. }
  1569. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1570. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1571. inExpectedTypes, outputCount, outputIndexes,
  1572. outExpectedTypes);
  1573. }
  1574. case ANEURALNETWORKS_POW: {
  1575. if (inputCount != 2 || outputCount != 1) {
  1576. logInvalidInOutNumber(2, 1);
  1577. return ANEURALNETWORKS_BAD_DATA;
  1578. }
  1579. auto inputType = operands[inputIndexes[0]].type;
  1580. std::vector<OperandType> inExpectedTypes;
  1581. std::vector<OperandType> outExpectedTypes;
  1582. if (inputType == OperandType::TENSOR_FLOAT16 ||
  1583. inputType == OperandType::TENSOR_FLOAT32) {
  1584. inExpectedTypes = {inputType, inputType};
  1585. outExpectedTypes = {inputType};
  1586. } else {
  1587. LOG(ERROR) << "Unsupported input tensor type for operation "
  1588. << getOperationName(opType);
  1589. return ANEURALNETWORKS_BAD_DATA;
  1590. }
  1591. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1592. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1593. inExpectedTypes, outputCount, outputIndexes,
  1594. outExpectedTypes);
  1595. }
  1596. case ANEURALNETWORKS_TOPK_V2: {
  1597. if (inputCount != 2 || outputCount != 2) {
  1598. logInvalidInOutNumber(2, 1);
  1599. return ANEURALNETWORKS_BAD_DATA;
  1600. }
  1601. std::vector<OperandType> inExpectedTypes;
  1602. std::vector<OperandType> outExpectedTypes;
  1603. OperandType inputType = operands[inputIndexes[0]].type;
  1604. if (inputType == OperandType::TENSOR_FLOAT16 ||
  1605. inputType == OperandType::TENSOR_FLOAT32 ||
  1606. inputType == OperandType::TENSOR_INT32 ||
  1607. inputType == OperandType::TENSOR_QUANT8_ASYMM) {
  1608. inExpectedTypes = {inputType, OperandType::INT32};
  1609. outExpectedTypes = {inputType, OperandType::TENSOR_INT32};
  1610. } else {
  1611. LOG(ERROR) << "Unsupported input tensor type for operation "
  1612. << getOperationName(opType);
  1613. return ANEURALNETWORKS_BAD_DATA;
  1614. }
  1615. NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
  1616. return validateOperationOperandTypes(operands, inputCount, inputIndexes,
  1617. inExpectedTypes, outputCount, outputIndexes,
  1618. outExpectedTypes);
  1619. }
  1620. default: {
  1621. const OperationRegistration* operationRegistration =
  1622. BuiltinOperationResolver::get()->findOperation(
  1623. static_cast<OperationType>(opType));
  1624. if (operationRegistration == nullptr) {
  1625. if (0 <= opType && opType < kNumberOfOperationTypes) {
  1626. LOG(ERROR) << getOperationName(opType) << " not registered";
  1627. } else {
  1628. LOG(ERROR) << "Operation type " << opType << " out of the range [0, "
  1629. << kNumberOfOperationTypes << ")";
  1630. }
  1631. return ANEURALNETWORKS_UNEXPECTED_NULL;
  1632. }
  1633. if (operationRegistration->validate == nullptr) {
  1634. LOG(ERROR) << "Incomplete operation registration: " << getOperationName(opType);
  1635. return ANEURALNETWORKS_UNEXPECTED_NULL;
  1636. }
  1637. OperationValidationContext context(inputCount, inputIndexes, outputCount, outputIndexes,
  1638. operands.data(), halVersion);
  1639. if (!operationRegistration->validate(&context)) {
  1640. LOG(ERROR) << "Validation failed for operation " << getOperationName(opType);
  1641. return ANEURALNETWORKS_BAD_DATA;
  1642. }
  1643. return ANEURALNETWORKS_NO_ERROR;
  1644. }
  1645. }
  1646. }
  1647. ErrorStatus convertResultCodeToErrorStatus(int resultCode) {
  1648. switch (resultCode) {
  1649. case ANEURALNETWORKS_NO_ERROR:
  1650. return ErrorStatus::NONE;
  1651. case ANEURALNETWORKS_BAD_DATA:
  1652. case ANEURALNETWORKS_UNEXPECTED_NULL:
  1653. return ErrorStatus::INVALID_ARGUMENT;
  1654. case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE:
  1655. return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
  1656. case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
  1657. return ErrorStatus::DEVICE_UNAVAILABLE;
  1658. default:
  1659. LOG(ERROR) << "Unknown result code " << resultCode
  1660. << " mapped to ErrorStatus::GENERAL_FAILURE";
  1661. return ErrorStatus::GENERAL_FAILURE;
  1662. case ANEURALNETWORKS_BAD_STATE:
  1663. case ANEURALNETWORKS_INCOMPLETE:
  1664. case ANEURALNETWORKS_OP_FAILED:
  1665. case ANEURALNETWORKS_OUT_OF_MEMORY:
  1666. case ANEURALNETWORKS_UNMAPPABLE:
  1667. return ErrorStatus::GENERAL_FAILURE;
  1668. }
  1669. }
  1670. int convertErrorStatusToResultCode(ErrorStatus status) {
  1671. switch (status) {
  1672. case ErrorStatus::NONE:
  1673. return ANEURALNETWORKS_NO_ERROR;
  1674. case ErrorStatus::INVALID_ARGUMENT:
  1675. return ANEURALNETWORKS_BAD_DATA;
  1676. case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
  1677. return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE;
  1678. case ErrorStatus::DEVICE_UNAVAILABLE:
  1679. return ANEURALNETWORKS_UNAVAILABLE_DEVICE;
  1680. default:
  1681. LOG(ERROR) << "Unknown ErrorStatus " << toString(status)
  1682. << " mapped to ANEURALNETWORKS_OP_FAILED";
  1683. return ANEURALNETWORKS_OP_FAILED;
  1684. case ErrorStatus::GENERAL_FAILURE:
  1685. return ANEURALNETWORKS_OP_FAILED;
  1686. }
  1687. }
  1688. // V1_2::Capabilities::operandPerformance utilities.
  1689. // The field V1_2::Capabilities::operandPerformance is a vector sorted by the
  1690. // field V1_2::Capabilities::OperandPerformance::type.
  1691. hidl_vec<Capabilities::OperandPerformance> nonExtensionOperandPerformance(PerformanceInfo perf) {
  1692. using OpPerf = Capabilities::OperandPerformance;
  1693. // Note: range presents enumerators in declaration order, not in numerical order.
  1694. static constexpr ::android::hardware::hidl_enum_range<OperandType> kOperandTypeRange;
  1695. hidl_vec<OpPerf> ret(kOperandTypeRange.end() - kOperandTypeRange.begin());
  1696. std::transform(kOperandTypeRange.begin(), kOperandTypeRange.end(), ret.begin(),
  1697. [perf](OperandType type) {
  1698. return Capabilities::OperandPerformance{type, perf};
  1699. });
  1700. std::sort(ret.begin(), ret.end(),
  1701. [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; });
  1702. return ret;
  1703. }
  1704. void update(hidl_vec<Capabilities::OperandPerformance>* operandPerformance, OperandType type,
  1705. PerformanceInfo perf) {
  1706. CHECK(operandPerformance != nullptr);
  1707. const auto it = std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
  1708. [](const Capabilities::OperandPerformance& perf,
  1709. OperandType type) { return perf.type < type; });
  1710. CHECK(it != operandPerformance->end())
  1711. << toString(type) << " not in " << toString(*operandPerformance);
  1712. it->info = perf;
  1713. }
  1714. PerformanceInfo lookup(const hidl_vec<Capabilities::OperandPerformance>& operandPerformance,
  1715. OperandType type) {
  1716. const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type,
  1717. [](const Capabilities::OperandPerformance& perf,
  1718. OperandType type) { return perf.type < type; });
  1719. if (it == operandPerformance.end()) {
  1720. LOG(WARNING) << "No PerformanceInfo for " << toString(type);
  1721. return {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
  1722. } else {
  1723. return it->info;
  1724. }
  1725. }
  1726. // Versioning
  1727. // In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM.
  1728. // This array must be in sorted order.
  1729. static const OperandType kQuantized8PerformanceConsistentWithP[] = {
  1730. OperandType::INT32, OperandType::UINT32, OperandType::TENSOR_INT32, OperandType::OEM,
  1731. OperandType::TENSOR_OEM_BYTE};
  1732. static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) {
  1733. const PerformanceInfo quantized8Performance =
  1734. lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM);
  1735. return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
  1736. std::end(kQuantized8PerformanceConsistentWithP),
  1737. [quantized8Performance, &capabilities](OperandType type) {
  1738. return quantized8Performance ==
  1739. lookup(capabilities.operandPerformance, type);
  1740. });
  1741. }
  1742. static hidl_vec<V1_2::Capabilities::OperandPerformance> makeQuantized8PerformanceConsistentWithP(
  1743. PerformanceInfo quantized8Performance) {
  1744. hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
  1745. sizeof(kQuantized8PerformanceConsistentWithP) /
  1746. sizeof(kQuantized8PerformanceConsistentWithP[0]));
  1747. std::transform(
  1748. std::begin(kQuantized8PerformanceConsistentWithP),
  1749. std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
  1750. [quantized8Performance](OperandType type) -> V1_2::Capabilities::OperandPerformance {
  1751. return {type, quantized8Performance};
  1752. });
  1753. return ret;
  1754. }
  1755. bool compliantWithV1_0(const V1_0::Capabilities&) {
  1756. return true;
  1757. }
  1758. bool compliantWithV1_0(const V1_1::Capabilities& capabilities) {
  1759. return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance;
  1760. }
  1761. bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
  1762. const PerformanceInfo perfTensorFloat32 =
  1763. lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32);
  1764. const PerformanceInfo perfFloat32 =
  1765. lookup(capabilities.operandPerformance, OperandType::FLOAT32);
  1766. if (perfTensorFloat32 != perfFloat32 ||
  1767. perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
  1768. perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
  1769. return false;
  1770. }
  1771. return isQuantized8PerformanceConsistentWithP(capabilities);
  1772. }
  1773. bool compliantWithV1_1(const V1_0::Capabilities&) {
  1774. return true;
  1775. }
  1776. bool compliantWithV1_1(const V1_1::Capabilities&) {
  1777. return true;
  1778. }
  1779. bool compliantWithV1_1(const V1_2::Capabilities& capabilities) {
  1780. if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
  1781. capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
  1782. (lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32) !=
  1783. lookup(capabilities.operandPerformance, OperandType::FLOAT32))) {
  1784. return false;
  1785. }
  1786. return isQuantized8PerformanceConsistentWithP(capabilities);
  1787. }
  1788. bool compliantWithV1_2(const V1_0::Capabilities&) {
  1789. return true;
  1790. }
  1791. bool compliantWithV1_2(const V1_1::Capabilities&) {
  1792. return true;
  1793. }
  1794. bool compliantWithV1_2(const V1_0::Model&) {
  1795. return true;
  1796. }
  1797. bool compliantWithV1_0(const V1_1::Model& model) {
  1798. // In addition to new enumeration values being introduced in V1_1::Model, a
  1799. // new flag was introduced to indicate whether or not float32 data can be
  1800. // calculated using float16 units. This 'relaxComputationFloat32toFloat16'
  1801. // flag is not relevant in whether a V1_1::Model is compliant with a
  1802. // V1_0::Model because all 1.0 drivers require strict calculation by default
  1803. // in the P NN runtime. Even if fp16 calculations are allowed, they can
  1804. // still be computed by a strict fp32 driver.
  1805. return std::all_of(
  1806. model.operations.begin(), model.operations.end(), [&model](const V1_1::Operation& op) {
  1807. int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
  1808. op.inputs.size() > 0 ? op.inputs.data() : nullptr,
  1809. op.outputs.size(),
  1810. op.outputs.size() > 0 ? op.outputs.data() : nullptr,
  1811. convertToV1_2(model.operands), HalVersion::V1_0);
  1812. return error == ANEURALNETWORKS_NO_ERROR;
  1813. });
  1814. }
  1815. bool compliantWithV1_1(const V1_0::Model&) {
  1816. return true;
  1817. }
  1818. bool compliantWithV1_1(const V1_1::Model&) {
  1819. return true;
  1820. }
  1821. static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) {
  1822. return static_cast<V1_0::OperationType>(type);
  1823. }
  1824. static V1_1::OperationType convertToV1_1(V1_0::OperationType type) {
  1825. return static_cast<V1_1::OperationType>(type);
  1826. }
  1827. V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) {
  1828. return capabilities;
  1829. }
  1830. V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) {
  1831. if (!compliantWithV1_0(capabilities)) {
  1832. LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
  1833. << " from V1_1::Capabilities to V1_0::Capabilities";
  1834. }
  1835. return { .float32Performance = capabilities.float32Performance,
  1836. .quantized8Performance = capabilities.quantized8Performance };
  1837. }
  1838. V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) {
  1839. if (!compliantWithV1_0(capabilities)) {
  1840. LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
  1841. << " from V1_2::Capabilities to V1_0::Capabilities";
  1842. }
  1843. return {.float32Performance =
  1844. lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
  1845. .quantized8Performance =
  1846. lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM)};
  1847. }
  1848. V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) {
  1849. return { .float32Performance = capabilities.float32Performance,
  1850. .quantized8Performance = capabilities.quantized8Performance,
  1851. .relaxedFloat32toFloat16Performance = capabilities.float32Performance };
  1852. }
  1853. V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) {
  1854. return capabilities;
  1855. }
  1856. V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) {
  1857. if (!compliantWithV1_1(capabilities)) {
  1858. LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
  1859. << " from V1_2::Capabilities to V1_1::Capabilities";
  1860. }
  1861. return {.float32Performance =
  1862. lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
  1863. .quantized8Performance =
  1864. lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM),
  1865. .relaxedFloat32toFloat16Performance =
  1866. capabilities.relaxedFloat32toFloat16PerformanceTensor};
  1867. }
  1868. V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) {
  1869. V1_2::Capabilities ret = {
  1870. .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance,
  1871. .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance,
  1872. .operandPerformance =
  1873. makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)};
  1874. auto& opPerf = ret.operandPerformance;
  1875. opPerf.resize(opPerf.size() + 2);
  1876. opPerf[opPerf.size() - 2] = {OperandType::TENSOR_FLOAT32, capabilities.float32Performance};
  1877. opPerf[opPerf.size() - 1] = {OperandType::FLOAT32, capabilities.float32Performance};
  1878. using OperandPerformance = V1_2::Capabilities::OperandPerformance;
  1879. std::sort(opPerf.begin(), opPerf.end(),
  1880. [](const OperandPerformance& a, const OperandPerformance& b) {
  1881. return a.type < b.type;
  1882. });
  1883. return ret;
  1884. }
  1885. V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) {
  1886. V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar =
  1887. capabilities.relaxedFloat32toFloat16Performance,
  1888. .relaxedFloat32toFloat16PerformanceTensor =
  1889. capabilities.relaxedFloat32toFloat16Performance,
  1890. .operandPerformance = makeQuantized8PerformanceConsistentWithP(
  1891. capabilities.quantized8Performance)};
  1892. auto& opPerf = ret.operandPerformance;
  1893. opPerf.resize(opPerf.size() + 2);
  1894. opPerf[opPerf.size() - 2] = {OperandType::TENSOR_FLOAT32, capabilities.float32Performance};
  1895. opPerf[opPerf.size() - 1] = {OperandType::FLOAT32, capabilities.float32Performance};
  1896. using OperandPerformance = V1_2::Capabilities::OperandPerformance;
  1897. std::sort(opPerf.begin(), opPerf.end(),
  1898. [](const OperandPerformance& a, const OperandPerformance& b) {
  1899. return a.type < b.type;
  1900. });
  1901. return ret;
  1902. }
  1903. V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) {
  1904. return capabilities;
  1905. }
  1906. static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) {
  1907. return {.type = uncheckedConvertToV1_0(operation.type),
  1908. .inputs = operation.inputs,
  1909. .outputs = operation.outputs};
  1910. }
  1911. static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) {
  1912. return {.type = convertToV1_1(operation.type),
  1913. .inputs = operation.inputs,
  1914. .outputs = operation.outputs};
  1915. }
  1916. static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
  1917. const hidl_vec<V1_1::Operation>& operations) {
  1918. hidl_vec<V1_0::Operation> result(operations.size());
  1919. std::transform(
  1920. operations.begin(), operations.end(), result.begin(),
  1921. [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); });
  1922. return result;
  1923. }
  1924. static hidl_vec<V1_1::Operation> convertToV1_1(const hidl_vec<V1_0::Operation>& operations) {
  1925. hidl_vec<V1_1::Operation> result(operations.size());
  1926. std::transform(operations.begin(), operations.end(), result.begin(),
  1927. [](const V1_0::Operation& operation) { return convertToV1_1(operation); });
  1928. return result;
  1929. }
  1930. bool compliantWithV1_0(const V1_2::Operand& operand) {
  1931. return validOperandType(static_cast<V1_0::OperandType>(operand.type)) &&
  1932. (nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type)) ||
  1933. operand.dimensions.size() != 0);
  1934. }
  1935. V1_0::Model convertToV1_0(const V1_0::Model& model) {
  1936. return model;
  1937. }
  1938. V1_0::Model convertToV1_0(const V1_1::Model& model) {
  1939. if (!compliantWithV1_0(model)) {
  1940. LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
  1941. << " from V1_1::Model to V1_0::Model";
  1942. }
  1943. return {.operands = model.operands,
  1944. .operations = uncheckedConvertToV1_0(model.operations),
  1945. .inputIndexes = model.inputIndexes,
  1946. .outputIndexes = model.outputIndexes,
  1947. .operandValues = model.operandValues,
  1948. .pools = model.pools};
  1949. }
  1950. V1_1::Model convertToV1_1(const V1_0::Model& model) {
  1951. return {.operands = model.operands,
  1952. .operations = convertToV1_1(model.operations),
  1953. .inputIndexes = model.inputIndexes,
  1954. .outputIndexes = model.outputIndexes,
  1955. .operandValues = model.operandValues,
  1956. .pools = model.pools,
  1957. .relaxComputationFloat32toFloat16 = false};
  1958. }
  1959. V1_1::Model convertToV1_1(const V1_1::Model& model) {
  1960. return model;
  1961. }
  1962. void logModelToInfo(const V1_2::Model& model) {
  1963. LOG(INFO) << "V1_2::Model start";
  1964. LOG(INFO) << "operands" << toString(model.operands);
  1965. LOG(INFO) << "operations" << toString(model.operations);
  1966. LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
  1967. LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
  1968. LOG(INFO) << "operandValues size" << model.operandValues.size();
  1969. LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
  1970. }
  1971. static bool compliantWith(HalVersion version, const V1_2::Model& model,
  1972. std::set<uint32_t>* noncompliantOperations) {
  1973. if (version >= HalVersion::V1_2) return true;
  1974. // A boolean vector indicating whether each pool is compliant with the target HAL version.
  1975. std::vector<bool> isPoolCompliant(model.pools.size(), false);
  1976. std::transform(model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
  1977. [version](const hidl_memory& pool) { return validatePool(pool, version); });
  1978. // A boolean vector indicating whether each operand is compliant with the target HAL version.
  1979. std::vector<bool> isOperandCompliant(model.operands.size(), false);
  1980. std::transform(model.operands.begin(), model.operands.end(), isOperandCompliant.begin(),
  1981. [&isPoolCompliant](const V1_2::Operand& op) {
  1982. // There is no V1_1::Operand -- both V1_0::Model and V1_1::Model use
  1983. // V1_0::Operand.
  1984. return compliantWithV1_0(op) &&
  1985. !(op.lifetime == OperandLifeTime::CONSTANT_REFERENCE &&
  1986. !isPoolCompliant[op.location.poolIndex]);
  1987. });
  1988. auto allOperandsCompliant = [&isOperandCompliant](const hidl_vec<uint32_t>& indices) {
  1989. return std::all_of(
  1990. indices.begin(), indices.end(),
  1991. [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; });
  1992. };
  1993. auto localValidateOperation = [&model, version,
  1994. &allOperandsCompliant](const V1_2::Operation& op) {
  1995. if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false;
  1996. int error = validateOperation(
  1997. static_cast<int32_t>(op.type), op.inputs.size(),
  1998. op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(),
  1999. op.outputs.size() > 0 ? op.outputs.data() : nullptr, model.operands, version);
  2000. return error == ANEURALNETWORKS_NO_ERROR;
  2001. };
  2002. if (noncompliantOperations) {
  2003. CHECK(noncompliantOperations->empty());
  2004. for (uint32_t idx = 0; idx < model.operations.size(); ++idx) {
  2005. if (!localValidateOperation(model.operations[idx])) {
  2006. noncompliantOperations->insert(idx);
  2007. }
  2008. }
  2009. return noncompliantOperations->empty();
  2010. } else {
  2011. return std::all_of(model.operations.begin(), model.operations.end(),
  2012. localValidateOperation);
  2013. }
  2014. }
  2015. bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
  2016. return compliantWith(HalVersion::V1_0, model, noncompliantOperations);
  2017. }
  2018. bool compliantWithV1_1(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
  2019. return compliantWith(HalVersion::V1_1, model, noncompliantOperations);
  2020. }
  2021. V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) {
  2022. return static_cast<V1_0::OperationType>(type);
  2023. }
  2024. V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) {
  2025. return static_cast<V1_1::OperationType>(type);
  2026. }
  2027. static V1_2::OperationType convertToV1_2(V1_0::OperationType type) {
  2028. return static_cast<V1_2::OperationType>(type);
  2029. }
  2030. static V1_2::OperationType convertToV1_2(V1_1::OperationType type) {
  2031. return static_cast<V1_2::OperationType>(type);
  2032. }
  2033. static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) {
  2034. return {.type = uncheckedConvertToV1_0(operation.type),
  2035. .inputs = operation.inputs,
  2036. .outputs = operation.outputs};
  2037. }
  2038. static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) {
  2039. return {.type = uncheckedConvertToV1_1(operation.type),
  2040. .inputs = operation.inputs,
  2041. .outputs = operation.outputs};
  2042. }
  2043. static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) {
  2044. return {.type = convertToV1_2(operation.type),
  2045. .inputs = operation.inputs,
  2046. .outputs = operation.outputs};
  2047. }
  2048. static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) {
  2049. return {.type = convertToV1_2(operation.type),
  2050. .inputs = operation.inputs,
  2051. .outputs = operation.outputs};
  2052. }
  2053. static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
  2054. const hidl_vec<V1_2::Operation>& operations) {
  2055. hidl_vec<V1_0::Operation> result(operations.size());
  2056. std::transform(
  2057. operations.begin(), operations.end(), result.begin(),
  2058. [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); });
  2059. return result;
  2060. }
  2061. static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
  2062. const hidl_vec<V1_2::Operation>& operations) {
  2063. hidl_vec<V1_1::Operation> result(operations.size());
  2064. std::transform(
  2065. operations.begin(), operations.end(), result.begin(),
  2066. [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); });
  2067. return result;
  2068. }
  2069. static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_0::Operation>& operations) {
  2070. hidl_vec<V1_2::Operation> result(operations.size());
  2071. std::transform(operations.begin(), operations.end(), result.begin(),
  2072. [](const V1_0::Operation& operation) { return convertToV1_2(operation); });
  2073. return result;
  2074. }
  2075. static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_1::Operation>& operations) {
  2076. hidl_vec<V1_2::Operation> result(operations.size());
  2077. std::transform(operations.begin(), operations.end(), result.begin(),
  2078. [](const V1_1::Operation& operation) { return convertToV1_2(operation); });
  2079. return result;
  2080. }
  2081. // We only need to convert from 1.0 and back since there wasn't any changes to
  2082. // Operand in 1.1
  2083. V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) {
  2084. return static_cast<V1_2::OperandType>(operandType);
  2085. }
  2086. static bool compliantWithV1_0(const V1_2::OperandType& operandType) {
  2087. return validOperandType(static_cast<V1_0::OperandType>(operandType));
  2088. }
  2089. V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) {
  2090. if (!compliantWithV1_0(operandType)) {
  2091. LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
  2092. << " from V1_2::Operand to V1_0::Operand";
  2093. }
  2094. return static_cast<V1_0::OperandType>(operandType);
  2095. }
  2096. // We only need to convert from 1.0 and back since there wasn't any changes to
  2097. // Operand in 1.1
  2098. V1_2::Operand convertToV1_2(const V1_0::Operand& operand) {
  2099. return {.type = convertToV1_2(operand.type),
  2100. .dimensions = operand.dimensions,
  2101. .numberOfConsumers = operand.numberOfConsumers,
  2102. .scale = operand.scale,
  2103. .zeroPoint = operand.zeroPoint,
  2104. .lifetime = operand.lifetime,
  2105. .location = operand.location};
  2106. }
  2107. V1_2::Operand convertToV1_2(const V1_2::Operand& operand) {
  2108. return operand;
  2109. }
  2110. V1_0::Operand convertToV1_0(const V1_2::Operand& operand) {
  2111. return {.type = convertToV1_0(operand.type),
  2112. .dimensions = operand.dimensions,
  2113. .numberOfConsumers = operand.numberOfConsumers,
  2114. .scale = operand.scale,
  2115. .zeroPoint = operand.zeroPoint,
  2116. .lifetime = operand.lifetime,
  2117. .location = operand.location};
  2118. }
  2119. // We only need to convert from 1.0 and back since there wasn't any changes to
  2120. // Operand in 1.1
  2121. hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands) {
  2122. hidl_vec<V1_2::Operand> result(operands.size());
  2123. std::transform(operands.begin(), operands.end(), result.begin(),
  2124. [](const V1_0::Operand& operand) { return convertToV1_2(operand); });
  2125. return result;
  2126. }
  2127. hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_2::Operand>& operands) {
  2128. return operands;
  2129. }
  2130. hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_2::Operand>& operands) {
  2131. hidl_vec<V1_0::Operand> result(operands.size());
  2132. std::transform(operands.begin(), operands.end(), result.begin(),
  2133. [](const V1_2::Operand& operand) { return convertToV1_0(operand); });
  2134. return result;
  2135. }
  2136. V1_0::Model convertToV1_0(const V1_2::Model& model) {
  2137. if (!compliantWithV1_0(model)) {
  2138. LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
  2139. << " from V1_2::Model to V1_0::Model";
  2140. }
  2141. return {.operands = convertToV1_0(model.operands),
  2142. .operations = uncheckedConvertToV1_0(model.operations),
  2143. .inputIndexes = model.inputIndexes,
  2144. .outputIndexes = model.outputIndexes,
  2145. .operandValues = model.operandValues,
  2146. .pools = model.pools};
  2147. }
  2148. V1_1::Model convertToV1_1(const V1_2::Model& model) {
  2149. if (!compliantWithV1_1(model)) {
  2150. LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
  2151. << " from V1_2::Model to V1_1::Model";
  2152. }
  2153. return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical.
  2154. .operations = uncheckedConvertToV1_1(model.operations),
  2155. .inputIndexes = model.inputIndexes,
  2156. .outputIndexes = model.outputIndexes,
  2157. .operandValues = model.operandValues,
  2158. .pools = model.pools,
  2159. .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
  2160. }
  2161. V1_2::Model convertToV1_2(const V1_0::Model& model) {
  2162. return {.operands = convertToV1_2(model.operands),
  2163. .operations = convertToV1_2(model.operations),
  2164. .inputIndexes = model.inputIndexes,
  2165. .outputIndexes = model.outputIndexes,
  2166. .operandValues = model.operandValues,
  2167. .pools = model.pools,
  2168. .relaxComputationFloat32toFloat16 = false};
  2169. }
  2170. V1_2::Model convertToV1_2(const V1_1::Model& model) {
  2171. return {.operands = convertToV1_2(model.operands),
  2172. .operations = convertToV1_2(model.operations),
  2173. .inputIndexes = model.inputIndexes,
  2174. .outputIndexes = model.outputIndexes,
  2175. .operandValues = model.operandValues,
  2176. .pools = model.pools,
  2177. .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
  2178. }
  2179. V1_2::Model convertToV1_2(const V1_2::Model& model) {
  2180. return model;
  2181. }
  2182. #ifdef NN_DEBUGGABLE
  2183. uint32_t getProp(const char* str, uint32_t defaultValue) {
  2184. const std::string propStr = android::base::GetProperty(str, "");
  2185. if (propStr.size() > 0) {
  2186. return std::stoi(propStr);
  2187. } else {
  2188. return defaultValue;
  2189. }
  2190. }
  2191. #endif // NN_DEBUGGABLE
  2192. } // namespace nn
  2193. } // namespace android