MIPS64Assembler.cpp 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447
  1. /* libs/pixelflinger/codeflinger/MIPS64Assembler.cpp
  2. **
  3. ** Copyright 2015, The Android Open Source Project
  4. **
  5. ** Licensed under the Apache License, Version 2.0 (the "License");
  6. ** you may not use this file except in compliance with the License.
  7. ** You may obtain a copy of the License at
  8. **
  9. ** http://www.apache.org/licenses/LICENSE-2.0
  10. **
  11. ** Unless required by applicable law or agreed to in writing, software
  12. ** distributed under the License is distributed on an "AS IS" BASIS,
  13. ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. ** See the License for the specific language governing permissions and
  15. ** limitations under the License.
  16. */
  17. /* MIPS64 assembler and ARM->MIPS64 assembly translator
  18. **
  19. ** The approach is utilize MIPSAssembler generator, using inherited MIPS64Assembler
  20. ** that overrides just the specific MIPS64r6 instructions.
  21. ** For now ArmToMips64Assembler is copied over from ArmToMipsAssembler class,
  22. ** changing some MIPS64r6 related stuff.
  23. **
  24. */
  25. #define LOG_TAG "MIPS64Assembler"
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <cutils/properties.h>
  29. #include <log/log.h>
  30. #include <private/pixelflinger/ggl_context.h>
  31. #include "MIPS64Assembler.h"
  32. #include "CodeCache.h"
  33. #include "mips64_disassem.h"
  34. #define NOT_IMPLEMENTED() LOG_ALWAYS_FATAL("Arm instruction %s not yet implemented\n", __func__)
  35. #define __unused __attribute__((__unused__))
  36. // ----------------------------------------------------------------------------
  37. namespace android {
  38. // ----------------------------------------------------------------------------
  39. #if 0
  40. #pragma mark -
  41. #pragma mark ArmToMips64Assembler...
  42. #endif
  43. ArmToMips64Assembler::ArmToMips64Assembler(const sp<Assembly>& assembly,
  44. char *abuf, int linesz, int instr_count)
  45. : ARMAssemblerInterface(),
  46. mArmDisassemblyBuffer(abuf),
  47. mArmLineLength(linesz),
  48. mArmInstrCount(instr_count),
  49. mInum(0),
  50. mAssembly(assembly)
  51. {
  52. mMips = new MIPS64Assembler(assembly, this);
  53. mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
  54. init_conditional_labels();
  55. }
  56. ArmToMips64Assembler::ArmToMips64Assembler(void* assembly)
  57. : ARMAssemblerInterface(),
  58. mArmDisassemblyBuffer(NULL),
  59. mInum(0),
  60. mAssembly(NULL)
  61. {
  62. mMips = new MIPS64Assembler(assembly, this);
  63. mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
  64. init_conditional_labels();
  65. }
  66. ArmToMips64Assembler::~ArmToMips64Assembler()
  67. {
  68. delete mMips;
  69. free((void *) mArmPC);
  70. }
  71. uint32_t* ArmToMips64Assembler::pc() const
  72. {
  73. return mMips->pc();
  74. }
  75. uint32_t* ArmToMips64Assembler::base() const
  76. {
  77. return mMips->base();
  78. }
  79. void ArmToMips64Assembler::reset()
  80. {
  81. cond.labelnum = 0;
  82. mInum = 0;
  83. mMips->reset();
  84. }
  85. int ArmToMips64Assembler::getCodegenArch()
  86. {
  87. return CODEGEN_ARCH_MIPS64;
  88. }
  89. void ArmToMips64Assembler::comment(const char* string)
  90. {
  91. mMips->comment(string);
  92. }
  93. void ArmToMips64Assembler::label(const char* theLabel)
  94. {
  95. mMips->label(theLabel);
  96. }
  97. void ArmToMips64Assembler::disassemble(const char* name)
  98. {
  99. mMips->disassemble(name);
  100. }
  101. void ArmToMips64Assembler::init_conditional_labels()
  102. {
  103. int i;
  104. for (i=0;i<99; ++i) {
  105. sprintf(cond.label[i], "cond_%d", i);
  106. }
  107. }
  108. #if 0
  109. #pragma mark -
  110. #pragma mark Prolog/Epilog & Generate...
  111. #endif
  112. void ArmToMips64Assembler::prolog()
  113. {
  114. mArmPC[mInum++] = pc(); // save starting PC for this instr
  115. mMips->DADDIU(R_sp, R_sp, -(5 * 8));
  116. mMips->SD(R_s0, R_sp, 0);
  117. mMips->SD(R_s1, R_sp, 8);
  118. mMips->SD(R_s2, R_sp, 16);
  119. mMips->SD(R_s3, R_sp, 24);
  120. mMips->SD(R_s4, R_sp, 32);
  121. mMips->MOVE(R_v0, R_a0); // move context * passed in a0 to v0 (arm r0)
  122. }
  123. void ArmToMips64Assembler::epilog(uint32_t touched __unused)
  124. {
  125. mArmPC[mInum++] = pc(); // save starting PC for this instr
  126. mMips->LD(R_s0, R_sp, 0);
  127. mMips->LD(R_s1, R_sp, 8);
  128. mMips->LD(R_s2, R_sp, 16);
  129. mMips->LD(R_s3, R_sp, 24);
  130. mMips->LD(R_s4, R_sp, 32);
  131. mMips->DADDIU(R_sp, R_sp, (5 * 8));
  132. mMips->JR(R_ra);
  133. }
  134. int ArmToMips64Assembler::generate(const char* name)
  135. {
  136. return mMips->generate(name);
  137. }
  138. void ArmToMips64Assembler::fix_branches()
  139. {
  140. mMips->fix_branches();
  141. }
  142. uint32_t* ArmToMips64Assembler::pcForLabel(const char* label)
  143. {
  144. return mMips->pcForLabel(label);
  145. }
  146. void ArmToMips64Assembler::set_condition(int mode, int R1, int R2) {
  147. if (mode == 2) {
  148. cond.type = SBIT_COND;
  149. } else {
  150. cond.type = CMP_COND;
  151. }
  152. cond.r1 = R1;
  153. cond.r2 = R2;
  154. }
  155. //----------------------------------------------------------
  156. #if 0
  157. #pragma mark -
  158. #pragma mark Addressing modes & shifters...
  159. #endif
  160. // do not need this for MIPS, but it is in the Interface (virtual)
  161. int ArmToMips64Assembler::buildImmediate(
  162. uint32_t immediate, uint32_t& rot, uint32_t& imm)
  163. {
  164. // for MIPS, any 32-bit immediate is OK
  165. rot = 0;
  166. imm = immediate;
  167. return 0;
  168. }
  169. // shifters...
  170. bool ArmToMips64Assembler::isValidImmediate(uint32_t immediate __unused)
  171. {
  172. // for MIPS, any 32-bit immediate is OK
  173. return true;
  174. }
  175. uint32_t ArmToMips64Assembler::imm(uint32_t immediate)
  176. {
  177. amode.value = immediate;
  178. return AMODE_IMM;
  179. }
  180. uint32_t ArmToMips64Assembler::reg_imm(int Rm, int type, uint32_t shift)
  181. {
  182. amode.reg = Rm;
  183. amode.stype = type;
  184. amode.value = shift;
  185. return AMODE_REG_IMM;
  186. }
  187. uint32_t ArmToMips64Assembler::reg_rrx(int Rm __unused)
  188. {
  189. // reg_rrx mode is not used in the GLLAssember code at this time
  190. return AMODE_UNSUPPORTED;
  191. }
  192. uint32_t ArmToMips64Assembler::reg_reg(int Rm __unused, int type __unused,
  193. int Rs __unused)
  194. {
  195. // reg_reg mode is not used in the GLLAssember code at this time
  196. return AMODE_UNSUPPORTED;
  197. }
  198. // addressing modes...
  199. // LDR(B)/STR(B)/PLD (immediate and Rm can be negative, which indicate U=0)
  200. uint32_t ArmToMips64Assembler::immed12_pre(int32_t immed12, int W)
  201. {
  202. LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
  203. "LDR(B)/STR(B)/PLD immediate too big (%08x)",
  204. immed12);
  205. amode.value = immed12;
  206. amode.writeback = W;
  207. return AMODE_IMM_12_PRE;
  208. }
  209. uint32_t ArmToMips64Assembler::immed12_post(int32_t immed12)
  210. {
  211. LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
  212. "LDR(B)/STR(B)/PLD immediate too big (%08x)",
  213. immed12);
  214. amode.value = immed12;
  215. return AMODE_IMM_12_POST;
  216. }
  217. uint32_t ArmToMips64Assembler::reg_scale_pre(int Rm, int type,
  218. uint32_t shift, int W)
  219. {
  220. LOG_ALWAYS_FATAL_IF(W | type | shift, "reg_scale_pre adv modes not yet implemented");
  221. amode.reg = Rm;
  222. // amode.stype = type; // more advanced modes not used in GGLAssembler yet
  223. // amode.value = shift;
  224. // amode.writeback = W;
  225. return AMODE_REG_SCALE_PRE;
  226. }
  227. uint32_t ArmToMips64Assembler::reg_scale_post(int Rm __unused, int type __unused,
  228. uint32_t shift __unused)
  229. {
  230. LOG_ALWAYS_FATAL("adr mode reg_scale_post not yet implemented\n");
  231. return AMODE_UNSUPPORTED;
  232. }
  233. // LDRH/LDRSB/LDRSH/STRH (immediate and Rm can be negative, which indicate U=0)
  234. uint32_t ArmToMips64Assembler::immed8_pre(int32_t immed8, int W __unused)
  235. {
  236. LOG_ALWAYS_FATAL("adr mode immed8_pre not yet implemented\n");
  237. LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
  238. "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
  239. immed8);
  240. return AMODE_IMM_8_PRE;
  241. }
  242. uint32_t ArmToMips64Assembler::immed8_post(int32_t immed8)
  243. {
  244. LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
  245. "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
  246. immed8);
  247. amode.value = immed8;
  248. return AMODE_IMM_8_POST;
  249. }
  250. uint32_t ArmToMips64Assembler::reg_pre(int Rm, int W)
  251. {
  252. LOG_ALWAYS_FATAL_IF(W, "reg_pre writeback not yet implemented");
  253. amode.reg = Rm;
  254. return AMODE_REG_PRE;
  255. }
  256. uint32_t ArmToMips64Assembler::reg_post(int Rm __unused)
  257. {
  258. LOG_ALWAYS_FATAL("adr mode reg_post not yet implemented\n");
  259. return AMODE_UNSUPPORTED;
  260. }
  261. // ----------------------------------------------------------------------------
  262. #if 0
  263. #pragma mark -
  264. #pragma mark Data Processing...
  265. #endif
  266. // check if the operand registers from a previous CMP or S-bit instruction
  267. // would be overwritten by this instruction. If so, move the value to a
  268. // safe register.
  269. // Note that we cannot tell at _this_ instruction time if a future (conditional)
  270. // instruction will _also_ use this value (a defect of the simple 1-pass, one-
  271. // instruction-at-a-time translation). Therefore we must be conservative and
  272. // save the value before it is overwritten. This costs an extra MOVE instr.
  273. void ArmToMips64Assembler::protectConditionalOperands(int Rd)
  274. {
  275. if (Rd == cond.r1) {
  276. mMips->MOVE(R_cmp, cond.r1);
  277. cond.r1 = R_cmp;
  278. }
  279. if (cond.type == CMP_COND && Rd == cond.r2) {
  280. mMips->MOVE(R_cmp2, cond.r2);
  281. cond.r2 = R_cmp2;
  282. }
  283. }
  284. // interprets the addressing mode, and generates the common code
  285. // used by the majority of data-processing ops. Many MIPS instructions
  286. // have a register-based form and a different immediate form. See
  287. // opAND below for an example. (this could be inlined)
  288. //
  289. // this works with the imm(), reg_imm() methods above, which are directly
  290. // called by the GLLAssembler.
  291. // note: _signed parameter defaults to false (un-signed)
  292. // note: tmpReg parameter defaults to 1, MIPS register AT
  293. int ArmToMips64Assembler::dataProcAdrModes(int op, int& source, bool _signed, int tmpReg)
  294. {
  295. if (op < AMODE_REG) {
  296. source = op;
  297. return SRC_REG;
  298. } else if (op == AMODE_IMM) {
  299. if ((!_signed && amode.value > 0xffff)
  300. || (_signed && ((int)amode.value < -32768 || (int)amode.value > 32767) )) {
  301. mMips->LUI(tmpReg, (amode.value >> 16));
  302. if (amode.value & 0x0000ffff) {
  303. mMips->ORI(tmpReg, tmpReg, (amode.value & 0x0000ffff));
  304. }
  305. source = tmpReg;
  306. return SRC_REG;
  307. } else {
  308. source = amode.value;
  309. return SRC_IMM;
  310. }
  311. } else if (op == AMODE_REG_IMM) {
  312. switch (amode.stype) {
  313. case LSL: mMips->SLL(tmpReg, amode.reg, amode.value); break;
  314. case LSR: mMips->SRL(tmpReg, amode.reg, amode.value); break;
  315. case ASR: mMips->SRA(tmpReg, amode.reg, amode.value); break;
  316. case ROR: mMips->ROTR(tmpReg, amode.reg, amode.value); break;
  317. }
  318. source = tmpReg;
  319. return SRC_REG;
  320. } else { // adr mode RRX is not used in GGL Assembler at this time
  321. // we are screwed, this should be exception, assert-fail or something
  322. LOG_ALWAYS_FATAL("adr mode reg_rrx not yet implemented\n");
  323. return SRC_ERROR;
  324. }
  325. }
  326. void ArmToMips64Assembler::dataProcessing(int opcode, int cc,
  327. int s, int Rd, int Rn, uint32_t Op2)
  328. {
  329. int src; // src is modified by dataProcAdrModes() - passed as int&
  330. if (cc != AL) {
  331. protectConditionalOperands(Rd);
  332. // the branch tests register(s) set by prev CMP or instr with 'S' bit set
  333. // inverse the condition to jump past this conditional instruction
  334. ArmToMips64Assembler::B(cc^1, cond.label[++cond.labelnum]);
  335. } else {
  336. mArmPC[mInum++] = pc(); // save starting PC for this instr
  337. }
  338. switch (opcode) {
  339. case opAND:
  340. if (dataProcAdrModes(Op2, src) == SRC_REG) {
  341. mMips->AND(Rd, Rn, src);
  342. } else { // adr mode was SRC_IMM
  343. mMips->ANDI(Rd, Rn, src);
  344. }
  345. break;
  346. case opADD:
  347. // set "signed" to true for adr modes
  348. if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
  349. mMips->ADDU(Rd, Rn, src);
  350. } else { // adr mode was SRC_IMM
  351. mMips->ADDIU(Rd, Rn, src);
  352. }
  353. break;
  354. case opSUB:
  355. // set "signed" to true for adr modes
  356. if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
  357. mMips->SUBU(Rd, Rn, src);
  358. } else { // adr mode was SRC_IMM
  359. mMips->SUBIU(Rd, Rn, src);
  360. }
  361. break;
  362. case opADD64:
  363. // set "signed" to true for adr modes
  364. if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
  365. mMips->DADDU(Rd, Rn, src);
  366. } else { // adr mode was SRC_IMM
  367. mMips->DADDIU(Rd, Rn, src);
  368. }
  369. break;
  370. case opSUB64:
  371. // set "signed" to true for adr modes
  372. if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
  373. mMips->DSUBU(Rd, Rn, src);
  374. } else { // adr mode was SRC_IMM
  375. mMips->DSUBIU(Rd, Rn, src);
  376. }
  377. break;
  378. case opEOR:
  379. if (dataProcAdrModes(Op2, src) == SRC_REG) {
  380. mMips->XOR(Rd, Rn, src);
  381. } else { // adr mode was SRC_IMM
  382. mMips->XORI(Rd, Rn, src);
  383. }
  384. break;
  385. case opORR:
  386. if (dataProcAdrModes(Op2, src) == SRC_REG) {
  387. mMips->OR(Rd, Rn, src);
  388. } else { // adr mode was SRC_IMM
  389. mMips->ORI(Rd, Rn, src);
  390. }
  391. break;
  392. case opBIC:
  393. if (dataProcAdrModes(Op2, src) == SRC_IMM) {
  394. // if we are 16-bit imnmediate, load to AT reg
  395. mMips->ORI(R_at, 0, src);
  396. src = R_at;
  397. }
  398. mMips->NOT(R_at, src);
  399. mMips->AND(Rd, Rn, R_at);
  400. break;
  401. case opRSB:
  402. if (dataProcAdrModes(Op2, src) == SRC_IMM) {
  403. // if we are 16-bit imnmediate, load to AT reg
  404. mMips->ORI(R_at, 0, src);
  405. src = R_at;
  406. }
  407. mMips->SUBU(Rd, src, Rn); // subu with the parameters reversed
  408. break;
  409. case opMOV:
  410. if (Op2 < AMODE_REG) { // op2 is reg # in this case
  411. mMips->MOVE(Rd, Op2);
  412. } else if (Op2 == AMODE_IMM) {
  413. if (amode.value > 0xffff) {
  414. mMips->LUI(Rd, (amode.value >> 16));
  415. if (amode.value & 0x0000ffff) {
  416. mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
  417. }
  418. } else {
  419. mMips->ORI(Rd, 0, amode.value);
  420. }
  421. } else if (Op2 == AMODE_REG_IMM) {
  422. switch (amode.stype) {
  423. case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
  424. case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
  425. case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
  426. case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
  427. }
  428. }
  429. else {
  430. // adr mode RRX is not used in GGL Assembler at this time
  431. mMips->UNIMPL();
  432. }
  433. break;
  434. case opMVN: // this is a 1's complement: NOT
  435. if (Op2 < AMODE_REG) { // op2 is reg # in this case
  436. mMips->NOR(Rd, Op2, 0); // NOT is NOR with 0
  437. break;
  438. } else if (Op2 == AMODE_IMM) {
  439. if (amode.value > 0xffff) {
  440. mMips->LUI(Rd, (amode.value >> 16));
  441. if (amode.value & 0x0000ffff) {
  442. mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
  443. }
  444. } else {
  445. mMips->ORI(Rd, 0, amode.value);
  446. }
  447. } else if (Op2 == AMODE_REG_IMM) {
  448. switch (amode.stype) {
  449. case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
  450. case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
  451. case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
  452. case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
  453. }
  454. }
  455. else {
  456. // adr mode RRX is not used in GGL Assembler at this time
  457. mMips->UNIMPL();
  458. }
  459. mMips->NOR(Rd, Rd, 0); // NOT is NOR with 0
  460. break;
  461. case opCMP:
  462. // Either operand of a CMP instr could get overwritten by a subsequent
  463. // conditional instruction, which is ok, _UNLESS_ there is a _second_
  464. // conditional instruction. Under MIPS, this requires doing the comparison
  465. // again (SLT), and the original operands must be available. (and this
  466. // pattern of multiple conditional instructions from same CMP _is_ used
  467. // in GGL-Assembler)
  468. //
  469. // For now, if a conditional instr overwrites the operands, we will
  470. // move them to dedicated temp regs. This is ugly, and inefficient,
  471. // and should be optimized.
  472. //
  473. // WARNING: making an _Assumption_ that CMP operand regs will NOT be
  474. // trashed by intervening NON-conditional instructions. In the general
  475. // case this is legal, but it is NOT currently done in GGL-Assembler.
  476. cond.type = CMP_COND;
  477. cond.r1 = Rn;
  478. if (dataProcAdrModes(Op2, src, false, R_cmp2) == SRC_REG) {
  479. cond.r2 = src;
  480. } else { // adr mode was SRC_IMM
  481. mMips->ORI(R_cmp2, R_zero, src);
  482. cond.r2 = R_cmp2;
  483. }
  484. break;
  485. case opTST:
  486. case opTEQ:
  487. case opCMN:
  488. case opADC:
  489. case opSBC:
  490. case opRSC:
  491. mMips->UNIMPL(); // currently unused in GGL Assembler code
  492. break;
  493. }
  494. if (cc != AL) {
  495. mMips->label(cond.label[cond.labelnum]);
  496. }
  497. if (s && opcode != opCMP) {
  498. cond.type = SBIT_COND;
  499. cond.r1 = Rd;
  500. }
  501. }
  502. #if 0
  503. #pragma mark -
  504. #pragma mark Multiply...
  505. #endif
  506. // multiply, accumulate
  507. void ArmToMips64Assembler::MLA(int cc __unused, int s,
  508. int Rd, int Rm, int Rs, int Rn) {
  509. //ALOGW("MLA");
  510. mArmPC[mInum++] = pc(); // save starting PC for this instr
  511. mMips->MUL(R_at, Rm, Rs);
  512. mMips->ADDU(Rd, R_at, Rn);
  513. if (s) {
  514. cond.type = SBIT_COND;
  515. cond.r1 = Rd;
  516. }
  517. }
  518. void ArmToMips64Assembler::MUL(int cc __unused, int s,
  519. int Rd, int Rm, int Rs) {
  520. mArmPC[mInum++] = pc();
  521. mMips->MUL(Rd, Rm, Rs);
  522. if (s) {
  523. cond.type = SBIT_COND;
  524. cond.r1 = Rd;
  525. }
  526. }
  527. void ArmToMips64Assembler::UMULL(int cc __unused, int s,
  528. int RdLo, int RdHi, int Rm, int Rs) {
  529. mArmPC[mInum++] = pc();
  530. mMips->MUH(RdHi, Rm, Rs);
  531. mMips->MUL(RdLo, Rm, Rs);
  532. if (s) {
  533. cond.type = SBIT_COND;
  534. cond.r1 = RdHi; // BUG...
  535. LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
  536. }
  537. }
  538. void ArmToMips64Assembler::UMUAL(int cc __unused, int s,
  539. int RdLo __unused, int RdHi, int Rm __unused, int Rs __unused) {
  540. LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
  541. "UMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
  542. // *mPC++ = (cc<<28) | (1<<23) | (1<<21) | (s<<20) |
  543. // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
  544. mArmPC[mInum++] = pc();
  545. mMips->NOP2();
  546. NOT_IMPLEMENTED();
  547. if (s) {
  548. cond.type = SBIT_COND;
  549. cond.r1 = RdHi; // BUG...
  550. LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
  551. }
  552. }
  553. void ArmToMips64Assembler::SMULL(int cc __unused, int s,
  554. int RdLo __unused, int RdHi, int Rm __unused, int Rs __unused) {
  555. LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
  556. "SMULL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
  557. // *mPC++ = (cc<<28) | (1<<23) | (1<<22) | (s<<20) |
  558. // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
  559. mArmPC[mInum++] = pc();
  560. mMips->NOP2();
  561. NOT_IMPLEMENTED();
  562. if (s) {
  563. cond.type = SBIT_COND;
  564. cond.r1 = RdHi; // BUG...
  565. LOG_ALWAYS_FATAL("Condition on SMULL must be on 64-bit result\n");
  566. }
  567. }
  568. void ArmToMips64Assembler::SMUAL(int cc __unused, int s,
  569. int RdLo __unused, int RdHi, int Rm __unused, int Rs __unused) {
  570. LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
  571. "SMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
  572. // *mPC++ = (cc<<28) | (1<<23) | (1<<22) | (1<<21) | (s<<20) |
  573. // (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
  574. mArmPC[mInum++] = pc();
  575. mMips->NOP2();
  576. NOT_IMPLEMENTED();
  577. if (s) {
  578. cond.type = SBIT_COND;
  579. cond.r1 = RdHi; // BUG...
  580. LOG_ALWAYS_FATAL("Condition on SMUAL must be on 64-bit result\n");
  581. }
  582. }
  583. #if 0
  584. #pragma mark -
  585. #pragma mark Branches...
  586. #endif
  587. // branches...
  588. void ArmToMips64Assembler::B(int cc, const char* label)
  589. {
  590. mArmPC[mInum++] = pc();
  591. if (cond.type == SBIT_COND) { cond.r2 = R_zero; }
  592. switch(cc) {
  593. case EQ: mMips->BEQ(cond.r1, cond.r2, label); break;
  594. case NE: mMips->BNE(cond.r1, cond.r2, label); break;
  595. case HS: mMips->BGEU(cond.r1, cond.r2, label); break;
  596. case LO: mMips->BLTU(cond.r1, cond.r2, label); break;
  597. case MI: mMips->BLT(cond.r1, cond.r2, label); break;
  598. case PL: mMips->BGE(cond.r1, cond.r2, label); break;
  599. case HI: mMips->BGTU(cond.r1, cond.r2, label); break;
  600. case LS: mMips->BLEU(cond.r1, cond.r2, label); break;
  601. case GE: mMips->BGE(cond.r1, cond.r2, label); break;
  602. case LT: mMips->BLT(cond.r1, cond.r2, label); break;
  603. case GT: mMips->BGT(cond.r1, cond.r2, label); break;
  604. case LE: mMips->BLE(cond.r1, cond.r2, label); break;
  605. case AL: mMips->B(label); break;
  606. case NV: /* B Never - no instruction */ break;
  607. case VS:
  608. case VC:
  609. default:
  610. LOG_ALWAYS_FATAL("Unsupported cc: %02x\n", cc);
  611. break;
  612. }
  613. }
  614. void ArmToMips64Assembler::BL(int cc __unused, const char* label __unused)
  615. {
  616. LOG_ALWAYS_FATAL("branch-and-link not supported yet\n");
  617. mArmPC[mInum++] = pc();
  618. }
  619. // no use for Branches with integer PC, but they're in the Interface class ....
  620. void ArmToMips64Assembler::B(int cc __unused, uint32_t* to_pc __unused)
  621. {
  622. LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
  623. mArmPC[mInum++] = pc();
  624. }
  625. void ArmToMips64Assembler::BL(int cc __unused, uint32_t* to_pc __unused)
  626. {
  627. LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
  628. mArmPC[mInum++] = pc();
  629. }
  630. void ArmToMips64Assembler::BX(int cc __unused, int Rn __unused)
  631. {
  632. LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
  633. mArmPC[mInum++] = pc();
  634. }
  635. #if 0
  636. #pragma mark -
  637. #pragma mark Data Transfer...
  638. #endif
  639. // data transfer...
  640. void ArmToMips64Assembler::LDR(int cc __unused, int Rd, int Rn, uint32_t offset)
  641. {
  642. mArmPC[mInum++] = pc();
  643. // work-around for ARM default address mode of immed12_pre(0)
  644. if (offset > AMODE_UNSUPPORTED) offset = 0;
  645. switch (offset) {
  646. case 0:
  647. amode.value = 0;
  648. amode.writeback = 0;
  649. // fall thru to next case ....
  650. case AMODE_IMM_12_PRE:
  651. if (Rn == ARMAssemblerInterface::SP) {
  652. Rn = R_sp; // convert LDR via Arm SP to LW via Mips SP
  653. }
  654. mMips->LW(Rd, Rn, amode.value);
  655. if (amode.writeback) { // OPTIONAL writeback on pre-index mode
  656. mMips->DADDIU(Rn, Rn, amode.value);
  657. }
  658. break;
  659. case AMODE_IMM_12_POST:
  660. if (Rn == ARMAssemblerInterface::SP) {
  661. Rn = R_sp; // convert STR thru Arm SP to STR thru Mips SP
  662. }
  663. mMips->LW(Rd, Rn, 0);
  664. mMips->DADDIU(Rn, Rn, amode.value);
  665. break;
  666. case AMODE_REG_SCALE_PRE:
  667. // we only support simple base + index, no advanced modes for this one yet
  668. mMips->DADDU(R_at, Rn, amode.reg);
  669. mMips->LW(Rd, R_at, 0);
  670. break;
  671. }
  672. }
  673. void ArmToMips64Assembler::LDRB(int cc __unused, int Rd, int Rn, uint32_t offset)
  674. {
  675. mArmPC[mInum++] = pc();
  676. // work-around for ARM default address mode of immed12_pre(0)
  677. if (offset > AMODE_UNSUPPORTED) offset = 0;
  678. switch (offset) {
  679. case 0:
  680. amode.value = 0;
  681. amode.writeback = 0;
  682. // fall thru to next case ....
  683. case AMODE_IMM_12_PRE:
  684. mMips->LBU(Rd, Rn, amode.value);
  685. if (amode.writeback) { // OPTIONAL writeback on pre-index mode
  686. mMips->DADDIU(Rn, Rn, amode.value);
  687. }
  688. break;
  689. case AMODE_IMM_12_POST:
  690. mMips->LBU(Rd, Rn, 0);
  691. mMips->DADDIU(Rn, Rn, amode.value);
  692. break;
  693. case AMODE_REG_SCALE_PRE:
  694. // we only support simple base + index, no advanced modes for this one yet
  695. mMips->DADDU(R_at, Rn, amode.reg);
  696. mMips->LBU(Rd, R_at, 0);
  697. break;
  698. }
  699. }
  700. void ArmToMips64Assembler::STR(int cc __unused, int Rd, int Rn, uint32_t offset)
  701. {
  702. mArmPC[mInum++] = pc();
  703. // work-around for ARM default address mode of immed12_pre(0)
  704. if (offset > AMODE_UNSUPPORTED) offset = 0;
  705. switch (offset) {
  706. case 0:
  707. amode.value = 0;
  708. amode.writeback = 0;
  709. // fall thru to next case ....
  710. case AMODE_IMM_12_PRE:
  711. if (Rn == ARMAssemblerInterface::SP) {
  712. Rn = R_sp; // convert STR thru Arm SP to SW thru Mips SP
  713. }
  714. if (amode.writeback) { // OPTIONAL writeback on pre-index mode
  715. // If we will writeback, then update the index reg, then store.
  716. // This correctly handles stack-push case.
  717. mMips->DADDIU(Rn, Rn, amode.value);
  718. mMips->SW(Rd, Rn, 0);
  719. } else {
  720. // No writeback so store offset by value
  721. mMips->SW(Rd, Rn, amode.value);
  722. }
  723. break;
  724. case AMODE_IMM_12_POST:
  725. mMips->SW(Rd, Rn, 0);
  726. mMips->DADDIU(Rn, Rn, amode.value); // post index always writes back
  727. break;
  728. case AMODE_REG_SCALE_PRE:
  729. // we only support simple base + index, no advanced modes for this one yet
  730. mMips->DADDU(R_at, Rn, amode.reg);
  731. mMips->SW(Rd, R_at, 0);
  732. break;
  733. }
  734. }
  735. void ArmToMips64Assembler::STRB(int cc __unused, int Rd, int Rn, uint32_t offset)
  736. {
  737. mArmPC[mInum++] = pc();
  738. // work-around for ARM default address mode of immed12_pre(0)
  739. if (offset > AMODE_UNSUPPORTED) offset = 0;
  740. switch (offset) {
  741. case 0:
  742. amode.value = 0;
  743. amode.writeback = 0;
  744. // fall thru to next case ....
  745. case AMODE_IMM_12_PRE:
  746. mMips->SB(Rd, Rn, amode.value);
  747. if (amode.writeback) { // OPTIONAL writeback on pre-index mode
  748. mMips->DADDIU(Rn, Rn, amode.value);
  749. }
  750. break;
  751. case AMODE_IMM_12_POST:
  752. mMips->SB(Rd, Rn, 0);
  753. mMips->DADDIU(Rn, Rn, amode.value);
  754. break;
  755. case AMODE_REG_SCALE_PRE:
  756. // we only support simple base + index, no advanced modes for this one yet
  757. mMips->DADDU(R_at, Rn, amode.reg);
  758. mMips->SB(Rd, R_at, 0);
  759. break;
  760. }
  761. }
  762. void ArmToMips64Assembler::LDRH(int cc __unused, int Rd, int Rn, uint32_t offset)
  763. {
  764. mArmPC[mInum++] = pc();
  765. // work-around for ARM default address mode of immed8_pre(0)
  766. if (offset > AMODE_UNSUPPORTED) offset = 0;
  767. switch (offset) {
  768. case 0:
  769. amode.value = 0;
  770. // fall thru to next case ....
  771. case AMODE_IMM_8_PRE: // no support yet for writeback
  772. mMips->LHU(Rd, Rn, amode.value);
  773. break;
  774. case AMODE_IMM_8_POST:
  775. mMips->LHU(Rd, Rn, 0);
  776. mMips->DADDIU(Rn, Rn, amode.value);
  777. break;
  778. case AMODE_REG_PRE:
  779. // we only support simple base +/- index
  780. if (amode.reg >= 0) {
  781. mMips->DADDU(R_at, Rn, amode.reg);
  782. } else {
  783. mMips->DSUBU(R_at, Rn, abs(amode.reg));
  784. }
  785. mMips->LHU(Rd, R_at, 0);
  786. break;
  787. }
  788. }
  789. void ArmToMips64Assembler::LDRSB(int cc __unused, int Rd __unused,
  790. int Rn __unused, uint32_t offset __unused)
  791. {
  792. mArmPC[mInum++] = pc();
  793. mMips->NOP2();
  794. NOT_IMPLEMENTED();
  795. }
  796. void ArmToMips64Assembler::LDRSH(int cc __unused, int Rd __unused,
  797. int Rn __unused, uint32_t offset __unused)
  798. {
  799. mArmPC[mInum++] = pc();
  800. mMips->NOP2();
  801. NOT_IMPLEMENTED();
  802. }
  803. void ArmToMips64Assembler::STRH(int cc __unused, int Rd, int Rn, uint32_t offset)
  804. {
  805. mArmPC[mInum++] = pc();
  806. // work-around for ARM default address mode of immed8_pre(0)
  807. if (offset > AMODE_UNSUPPORTED) offset = 0;
  808. switch (offset) {
  809. case 0:
  810. amode.value = 0;
  811. // fall thru to next case ....
  812. case AMODE_IMM_8_PRE: // no support yet for writeback
  813. mMips->SH(Rd, Rn, amode.value);
  814. break;
  815. case AMODE_IMM_8_POST:
  816. mMips->SH(Rd, Rn, 0);
  817. mMips->DADDIU(Rn, Rn, amode.value);
  818. break;
  819. case AMODE_REG_PRE:
  820. // we only support simple base +/- index
  821. if (amode.reg >= 0) {
  822. mMips->DADDU(R_at, Rn, amode.reg);
  823. } else {
  824. mMips->DSUBU(R_at, Rn, abs(amode.reg));
  825. }
  826. mMips->SH(Rd, R_at, 0);
  827. break;
  828. }
  829. }
  830. #if 0
  831. #pragma mark -
  832. #pragma mark Block Data Transfer...
  833. #endif
  834. // block data transfer...
  835. void ArmToMips64Assembler::LDM(int cc __unused, int dir __unused,
  836. int Rn __unused, int W __unused, uint32_t reg_list __unused)
  837. { // ED FD EA FA IB IA DB DA
  838. // const uint8_t P[8] = { 1, 0, 1, 0, 1, 0, 1, 0 };
  839. // const uint8_t U[8] = { 1, 1, 0, 0, 1, 1, 0, 0 };
  840. // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
  841. // (uint32_t(U[dir])<<23) | (1<<20) | (W<<21) | (Rn<<16) | reg_list;
  842. mArmPC[mInum++] = pc();
  843. mMips->NOP2();
  844. NOT_IMPLEMENTED();
  845. }
  846. void ArmToMips64Assembler::STM(int cc __unused, int dir __unused,
  847. int Rn __unused, int W __unused, uint32_t reg_list __unused)
  848. { // FA EA FD ED IB IA DB DA
  849. // const uint8_t P[8] = { 0, 1, 0, 1, 1, 0, 1, 0 };
  850. // const uint8_t U[8] = { 0, 0, 1, 1, 1, 1, 0, 0 };
  851. // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
  852. // (uint32_t(U[dir])<<23) | (0<<20) | (W<<21) | (Rn<<16) | reg_list;
  853. mArmPC[mInum++] = pc();
  854. mMips->NOP2();
  855. NOT_IMPLEMENTED();
  856. }
  857. #if 0
  858. #pragma mark -
  859. #pragma mark Special...
  860. #endif
  861. // special...
  862. void ArmToMips64Assembler::SWP(int cc __unused, int Rn __unused,
  863. int Rd __unused, int Rm __unused) {
  864. // *mPC++ = (cc<<28) | (2<<23) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
  865. mArmPC[mInum++] = pc();
  866. mMips->NOP2();
  867. NOT_IMPLEMENTED();
  868. }
  869. void ArmToMips64Assembler::SWPB(int cc __unused, int Rn __unused,
  870. int Rd __unused, int Rm __unused) {
  871. // *mPC++ = (cc<<28) | (2<<23) | (1<<22) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
  872. mArmPC[mInum++] = pc();
  873. mMips->NOP2();
  874. NOT_IMPLEMENTED();
  875. }
  876. void ArmToMips64Assembler::SWI(int cc __unused, uint32_t comment __unused) {
  877. // *mPC++ = (cc<<28) | (0xF<<24) | comment;
  878. mArmPC[mInum++] = pc();
  879. mMips->NOP2();
  880. NOT_IMPLEMENTED();
  881. }
  882. #if 0
  883. #pragma mark -
  884. #pragma mark DSP instructions...
  885. #endif
  886. // DSP instructions...
  887. void ArmToMips64Assembler::PLD(int Rn __unused, uint32_t offset) {
  888. LOG_ALWAYS_FATAL_IF(!((offset&(1<<24)) && !(offset&(1<<21))),
  889. "PLD only P=1, W=0");
  890. // *mPC++ = 0xF550F000 | (Rn<<16) | offset;
  891. mArmPC[mInum++] = pc();
  892. mMips->NOP2();
  893. NOT_IMPLEMENTED();
  894. }
  895. void ArmToMips64Assembler::CLZ(int cc __unused, int Rd, int Rm)
  896. {
  897. mArmPC[mInum++] = pc();
  898. mMips->CLZ(Rd, Rm);
  899. }
  900. void ArmToMips64Assembler::QADD(int cc __unused, int Rd __unused,
  901. int Rm __unused, int Rn __unused)
  902. {
  903. // *mPC++ = (cc<<28) | 0x1000050 | (Rn<<16) | (Rd<<12) | Rm;
  904. mArmPC[mInum++] = pc();
  905. mMips->NOP2();
  906. NOT_IMPLEMENTED();
  907. }
  908. void ArmToMips64Assembler::QDADD(int cc __unused, int Rd __unused,
  909. int Rm __unused, int Rn __unused)
  910. {
  911. // *mPC++ = (cc<<28) | 0x1400050 | (Rn<<16) | (Rd<<12) | Rm;
  912. mArmPC[mInum++] = pc();
  913. mMips->NOP2();
  914. NOT_IMPLEMENTED();
  915. }
  916. void ArmToMips64Assembler::QSUB(int cc __unused, int Rd __unused,
  917. int Rm __unused, int Rn __unused)
  918. {
  919. // *mPC++ = (cc<<28) | 0x1200050 | (Rn<<16) | (Rd<<12) | Rm;
  920. mArmPC[mInum++] = pc();
  921. mMips->NOP2();
  922. NOT_IMPLEMENTED();
  923. }
  924. void ArmToMips64Assembler::QDSUB(int cc __unused, int Rd __unused,
  925. int Rm __unused, int Rn __unused)
  926. {
  927. // *mPC++ = (cc<<28) | 0x1600050 | (Rn<<16) | (Rd<<12) | Rm;
  928. mArmPC[mInum++] = pc();
  929. mMips->NOP2();
  930. NOT_IMPLEMENTED();
  931. }
  932. // 16 x 16 signed multiply (like SMLAxx without the accumulate)
  933. void ArmToMips64Assembler::SMUL(int cc __unused, int xy,
  934. int Rd, int Rm, int Rs)
  935. {
  936. mArmPC[mInum++] = pc();
  937. // the 16 bits may be in the top or bottom half of 32-bit source reg,
  938. // as defined by the codes BB, BT, TB, TT (compressed param xy)
  939. // where x corresponds to Rm and y to Rs
  940. // select half-reg for Rm
  941. if (xy & xyTB) {
  942. // use top 16-bits
  943. mMips->SRA(R_at, Rm, 16);
  944. } else {
  945. // use bottom 16, but sign-extend to 32
  946. mMips->SEH(R_at, Rm);
  947. }
  948. // select half-reg for Rs
  949. if (xy & xyBT) {
  950. // use top 16-bits
  951. mMips->SRA(R_at2, Rs, 16);
  952. } else {
  953. // use bottom 16, but sign-extend to 32
  954. mMips->SEH(R_at2, Rs);
  955. }
  956. mMips->MUL(Rd, R_at, R_at2);
  957. }
  958. // signed 32b x 16b multiple, save top 32-bits of 48-bit result
  959. void ArmToMips64Assembler::SMULW(int cc __unused, int y,
  960. int Rd, int Rm, int Rs)
  961. {
  962. mArmPC[mInum++] = pc();
  963. // the selector yT or yB refers to reg Rs
  964. if (y & yT) {
  965. // zero the bottom 16-bits, with 2 shifts, it can affect result
  966. mMips->SRL(R_at, Rs, 16);
  967. mMips->SLL(R_at, R_at, 16);
  968. } else {
  969. // move low 16-bit half, to high half
  970. mMips->SLL(R_at, Rs, 16);
  971. }
  972. mMips->MUH(Rd, Rm, R_at);
  973. }
  974. // 16 x 16 signed multiply, accumulate: Rd = Rm{16} * Rs{16} + Rn
  975. void ArmToMips64Assembler::SMLA(int cc __unused, int xy,
  976. int Rd, int Rm, int Rs, int Rn)
  977. {
  978. mArmPC[mInum++] = pc();
  979. // the 16 bits may be in the top or bottom half of 32-bit source reg,
  980. // as defined by the codes BB, BT, TB, TT (compressed param xy)
  981. // where x corresponds to Rm and y to Rs
  982. // select half-reg for Rm
  983. if (xy & xyTB) {
  984. // use top 16-bits
  985. mMips->SRA(R_at, Rm, 16);
  986. } else {
  987. // use bottom 16, but sign-extend to 32
  988. mMips->SEH(R_at, Rm);
  989. }
  990. // select half-reg for Rs
  991. if (xy & xyBT) {
  992. // use top 16-bits
  993. mMips->SRA(R_at2, Rs, 16);
  994. } else {
  995. // use bottom 16, but sign-extend to 32
  996. mMips->SEH(R_at2, Rs);
  997. }
  998. mMips->MUL(R_at, R_at, R_at2);
  999. mMips->ADDU(Rd, R_at, Rn);
  1000. }
  1001. void ArmToMips64Assembler::SMLAL(int cc __unused, int xy __unused,
  1002. int RdHi __unused, int RdLo __unused,
  1003. int Rs __unused, int Rm __unused)
  1004. {
  1005. // *mPC++ = (cc<<28) | 0x1400080 | (RdHi<<16) | (RdLo<<12) | (Rs<<8) | (xy<<4) | Rm;
  1006. mArmPC[mInum++] = pc();
  1007. mMips->NOP2();
  1008. NOT_IMPLEMENTED();
  1009. }
  1010. void ArmToMips64Assembler::SMLAW(int cc __unused, int y __unused,
  1011. int Rd __unused, int Rm __unused,
  1012. int Rs __unused, int Rn __unused)
  1013. {
  1014. // *mPC++ = (cc<<28) | 0x1200080 | (Rd<<16) | (Rn<<12) | (Rs<<8) | (y<<4) | Rm;
  1015. mArmPC[mInum++] = pc();
  1016. mMips->NOP2();
  1017. NOT_IMPLEMENTED();
  1018. }
  1019. // used by ARMv6 version of GGLAssembler::filter32
  1020. void ArmToMips64Assembler::UXTB16(int cc __unused, int Rd, int Rm, int rotate)
  1021. {
  1022. mArmPC[mInum++] = pc();
  1023. //Rd[31:16] := ZeroExtend((Rm ROR (8 * sh))[23:16]),
  1024. //Rd[15:0] := ZeroExtend((Rm ROR (8 * sh))[7:0]). sh 0-3.
  1025. mMips->ROTR(R_at2, Rm, rotate * 8);
  1026. mMips->LUI(R_at, 0xFF);
  1027. mMips->ORI(R_at, R_at, 0xFF);
  1028. mMips->AND(Rd, R_at2, R_at);
  1029. }
  1030. void ArmToMips64Assembler::UBFX(int cc __unused, int Rd __unused, int Rn __unused,
  1031. int lsb __unused, int width __unused)
  1032. {
  1033. /* Placeholder for UBFX */
  1034. mArmPC[mInum++] = pc();
  1035. mMips->NOP2();
  1036. NOT_IMPLEMENTED();
  1037. }
  1038. // ----------------------------------------------------------------------------
  1039. // Address Processing...
  1040. // ----------------------------------------------------------------------------
  1041. void ArmToMips64Assembler::ADDR_ADD(int cc,
  1042. int s, int Rd, int Rn, uint32_t Op2)
  1043. {
  1044. // if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
  1045. // if(s != 0) { NOT_IMPLEMENTED(); return;} //Not required
  1046. dataProcessing(opADD64, cc, s, Rd, Rn, Op2);
  1047. }
  1048. void ArmToMips64Assembler::ADDR_SUB(int cc,
  1049. int s, int Rd, int Rn, uint32_t Op2)
  1050. {
  1051. // if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
  1052. // if(s != 0) { NOT_IMPLEMENTED(); return;} //Not required
  1053. dataProcessing(opSUB64, cc, s, Rd, Rn, Op2);
  1054. }
  1055. void ArmToMips64Assembler::ADDR_LDR(int cc __unused, int Rd,
  1056. int Rn, uint32_t offset) {
  1057. mArmPC[mInum++] = pc();
  1058. // work-around for ARM default address mode of immed12_pre(0)
  1059. if (offset > AMODE_UNSUPPORTED) offset = 0;
  1060. switch (offset) {
  1061. case 0:
  1062. amode.value = 0;
  1063. amode.writeback = 0;
  1064. // fall thru to next case ....
  1065. case AMODE_IMM_12_PRE:
  1066. if (Rn == ARMAssemblerInterface::SP) {
  1067. Rn = R_sp; // convert LDR via Arm SP to LW via Mips SP
  1068. }
  1069. mMips->LD(Rd, Rn, amode.value);
  1070. if (amode.writeback) { // OPTIONAL writeback on pre-index mode
  1071. mMips->DADDIU(Rn, Rn, amode.value);
  1072. }
  1073. break;
  1074. case AMODE_IMM_12_POST:
  1075. if (Rn == ARMAssemblerInterface::SP) {
  1076. Rn = R_sp; // convert STR thru Arm SP to STR thru Mips SP
  1077. }
  1078. mMips->LD(Rd, Rn, 0);
  1079. mMips->DADDIU(Rn, Rn, amode.value);
  1080. break;
  1081. case AMODE_REG_SCALE_PRE:
  1082. // we only support simple base + index, no advanced modes for this one yet
  1083. mMips->DADDU(R_at, Rn, amode.reg);
  1084. mMips->LD(Rd, R_at, 0);
  1085. break;
  1086. }
  1087. }
  1088. void ArmToMips64Assembler::ADDR_STR(int cc __unused, int Rd,
  1089. int Rn, uint32_t offset) {
  1090. mArmPC[mInum++] = pc();
  1091. // work-around for ARM default address mode of immed12_pre(0)
  1092. if (offset > AMODE_UNSUPPORTED) offset = 0;
  1093. switch (offset) {
  1094. case 0:
  1095. amode.value = 0;
  1096. amode.writeback = 0;
  1097. // fall thru to next case ....
  1098. case AMODE_IMM_12_PRE:
  1099. if (Rn == ARMAssemblerInterface::SP) {
  1100. Rn = R_sp; // convert STR thru Arm SP to SW thru Mips SP
  1101. }
  1102. if (amode.writeback) { // OPTIONAL writeback on pre-index mode
  1103. // If we will writeback, then update the index reg, then store.
  1104. // This correctly handles stack-push case.
  1105. mMips->DADDIU(Rn, Rn, amode.value);
  1106. mMips->SD(Rd, Rn, 0);
  1107. } else {
  1108. // No writeback so store offset by value
  1109. mMips->SD(Rd, Rn, amode.value);
  1110. }
  1111. break;
  1112. case AMODE_IMM_12_POST:
  1113. mMips->SD(Rd, Rn, 0);
  1114. mMips->DADDIU(Rn, Rn, amode.value); // post index always writes back
  1115. break;
  1116. case AMODE_REG_SCALE_PRE:
  1117. // we only support simple base + index, no advanced modes for this one yet
  1118. mMips->DADDU(R_at, Rn, amode.reg);
  1119. mMips->SD(Rd, R_at, 0);
  1120. break;
  1121. }
  1122. }
  1123. #if 0
  1124. #pragma mark -
  1125. #pragma mark MIPS Assembler...
  1126. #endif
  1127. //**************************************************************************
  1128. //**************************************************************************
  1129. //**************************************************************************
  1130. /* MIPS64 assembler
  1131. ** this is a subset of mips64r6, targeted specifically at ARM instruction
  1132. ** replacement in the pixelflinger/codeflinger code.
  1133. **
  1134. ** This class is extended from MIPSAssembler class and overrides only
  1135. ** MIPS64r6 specific stuff.
  1136. */
  1137. MIPS64Assembler::MIPS64Assembler(const sp<Assembly>& assembly, ArmToMips64Assembler *parent)
  1138. : MIPSAssembler::MIPSAssembler(assembly, NULL), mParent(parent)
  1139. {
  1140. }
  1141. MIPS64Assembler::MIPS64Assembler(void* assembly, ArmToMips64Assembler *parent)
  1142. : MIPSAssembler::MIPSAssembler(assembly), mParent(parent)
  1143. {
  1144. }
  1145. MIPS64Assembler::~MIPS64Assembler()
  1146. {
  1147. }
  1148. void MIPS64Assembler::reset()
  1149. {
  1150. if (mAssembly != NULL) {
  1151. mBase = mPC = (uint32_t *)mAssembly->base();
  1152. } else {
  1153. mPC = mBase = base();
  1154. }
  1155. mBranchTargets.clear();
  1156. mLabels.clear();
  1157. mLabelsInverseMapping.clear();
  1158. mComments.clear();
  1159. }
  1160. void MIPS64Assembler::disassemble(const char* name __unused)
  1161. {
  1162. char di_buf[140];
  1163. bool arm_disasm_fmt = (mParent->mArmDisassemblyBuffer == NULL) ? false : true;
  1164. typedef char dstr[40];
  1165. dstr *lines = (dstr *)mParent->mArmDisassemblyBuffer;
  1166. if (mParent->mArmDisassemblyBuffer != NULL) {
  1167. for (int i=0; i<mParent->mArmInstrCount; ++i) {
  1168. string_detab(lines[i]);
  1169. }
  1170. }
  1171. size_t count = pc()-base();
  1172. uint32_t* mipsPC = base();
  1173. while (count--) {
  1174. ssize_t label = mLabelsInverseMapping.indexOfKey(mipsPC);
  1175. if (label >= 0) {
  1176. ALOGW("%s:\n", mLabelsInverseMapping.valueAt(label));
  1177. }
  1178. ssize_t comment = mComments.indexOfKey(mipsPC);
  1179. if (comment >= 0) {
  1180. ALOGW("; %s\n", mComments.valueAt(comment));
  1181. }
  1182. ::mips_disassem(mipsPC, di_buf, arm_disasm_fmt);
  1183. string_detab(di_buf);
  1184. string_pad(di_buf, 30);
  1185. ALOGW("%08lx: %08x %s", uintptr_t(mipsPC), uint32_t(*mipsPC), di_buf);
  1186. mipsPC++;
  1187. }
  1188. }
  1189. void MIPS64Assembler::fix_branches()
  1190. {
  1191. // fixup all the branches
  1192. size_t count = mBranchTargets.size();
  1193. while (count--) {
  1194. const branch_target_t& bt = mBranchTargets[count];
  1195. uint32_t* target_pc = mLabels.valueFor(bt.label);
  1196. LOG_ALWAYS_FATAL_IF(!target_pc,
  1197. "error resolving branch targets, target_pc is null");
  1198. int32_t offset = int32_t(target_pc - (bt.pc+1));
  1199. *bt.pc |= offset & 0x00FFFF;
  1200. }
  1201. }
  1202. void MIPS64Assembler::DADDU(int Rd, int Rs, int Rt)
  1203. {
  1204. *mPC++ = (spec_op<<OP_SHF) | (daddu_fn<<FUNC_SHF)
  1205. | (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF);
  1206. }
  1207. void MIPS64Assembler::DADDIU(int Rt, int Rs, int16_t imm)
  1208. {
  1209. *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
  1210. }
  1211. void MIPS64Assembler::DSUBU(int Rd, int Rs, int Rt)
  1212. {
  1213. *mPC++ = (spec_op<<OP_SHF) | (dsubu_fn<<FUNC_SHF) |
  1214. (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
  1215. }
  1216. void MIPS64Assembler::DSUBIU(int Rt, int Rs, int16_t imm) // really addiu(d, s, -j)
  1217. {
  1218. *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | ((-imm) & MSK_16);
  1219. }
  1220. void MIPS64Assembler::MUL(int Rd, int Rs, int Rt)
  1221. {
  1222. *mPC++ = (spec_op<<OP_SHF) | (mul_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
  1223. (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
  1224. }
  1225. void MIPS64Assembler::MUH(int Rd, int Rs, int Rt)
  1226. {
  1227. *mPC++ = (spec_op<<OP_SHF) | (muh_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
  1228. (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
  1229. }
  1230. void MIPS64Assembler::CLO(int Rd, int Rs)
  1231. {
  1232. *mPC++ = (spec_op<<OP_SHF) | (17<<FUNC_SHF) |
  1233. (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
  1234. }
  1235. void MIPS64Assembler::CLZ(int Rd, int Rs)
  1236. {
  1237. *mPC++ = (spec_op<<OP_SHF) | (16<<FUNC_SHF) |
  1238. (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
  1239. }
  1240. void MIPS64Assembler::LD(int Rt, int Rbase, int16_t offset)
  1241. {
  1242. *mPC++ = (ld_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
  1243. }
  1244. void MIPS64Assembler::SD(int Rt, int Rbase, int16_t offset)
  1245. {
  1246. *mPC++ = (sd_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
  1247. }
  1248. void MIPS64Assembler::LUI(int Rt, int16_t offset)
  1249. {
  1250. *mPC++ = (aui_op<<OP_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
  1251. }
  1252. void MIPS64Assembler::JR(int Rs)
  1253. {
  1254. *mPC++ = (spec_op<<OP_SHF) | (Rs<<RS_SHF) | (jalr_fn << FUNC_SHF);
  1255. MIPS64Assembler::NOP();
  1256. }
  1257. }; // namespace android: