rsCpuIntrinsics_advsimd_Blur.S 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868
  1. /*
  2. * Copyright (C) 2014 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define ENTRY(f) .text; .align 4; .globl f; .type f,#function; f:
  17. #define PRIVATE(f) .text; .align 4; .type f,#function; f:
  18. #define END(f) .size f, .-f;
  19. //#define ARCH_ARM64_USE_BLUR_PRELOAD
  20. /* Number of fractional bits to preserve in intermediate results. The
  21. * intermediate storage is 16-bit, and we started with 8 bit data (the integer
  22. * part), so this should be between 0 and 8.
  23. */
  24. .set FRACTION_BITS, 7
  25. .set MAX_R, 25
  26. /* A quick way of making a line of code conditional on some other condition.
  27. * Use `.set cc, 1` or `.set cc, 0` to enable or disable lines prefixed with
  28. * `ifcc`:
  29. */
  30. .macro ifcc zzz:vararg
  31. .if cc
  32. \zzz
  33. .endif
  34. .endm
  35. /* It's not always clear that prefetching is beneficial and this needs further
  36. * testing on different cores, so it's made switchable here.
  37. */
  38. #if defined(ARCH_ARM64_USE_BLUR_PRELOAD)
  39. #define VERTPLD(...) prfm PLDL1KEEP, [__VA_ARGS__]
  40. #else
  41. #define VERTPLD(...) nop
  42. #endif
  43. /* Fetch 16 columns of bytes (regardless of image format), convolve these
  44. * vertically, and leave them in the register file. If working near the top or
  45. * bottom of an image then clamp the addressing while loading the data in.
  46. *
  47. * The convolution is fully unrolled for windows up to max_r, with the
  48. * outermost edges calculated first. This way it's possible to branch directly
  49. * into the relevant part of the code for an arbitrary convolution radius. Two
  50. * variants of the loop are produced; one eliminates the clamping code for a
  51. * slight speed advantage.
  52. *
  53. * Where the macro is called with reg=x, the specified register is taken to
  54. * contain a pre-calculated pointer into one of the two loops.
  55. *
  56. * Input:
  57. * x1 -- src
  58. * x2 -- pitch
  59. * x5 -- r
  60. * x6 -- rup (r, unless clipped to top of source image)
  61. * x7 -- rdn (r, unless clipped to bottom of source image)
  62. * x12 -- switch index
  63. * v0-v3 -- coefficient table
  64. * x13 = -pitch
  65. * x15 = top-row in
  66. * x19 = bottom-row in
  67. * Output:
  68. * x1 += 16
  69. * v10,v11 -- 16 convolved columns
  70. * Modifies:
  71. * x10 = upper row pointer
  72. * x11 = lower row pointer
  73. * v12-v15 = temporary sums
  74. */
  75. .macro fetch, max_r=MAX_R, labelc=1, labelnc=2, reg=x12 /*{{{*/
  76. .ifc \reg,x12 ; .set cc, 1 ; .else ; .set cc, 0 ; .endif
  77. ld1 {v15.16b}, [x1], #16
  78. mov x10, x15
  79. uxtl v14.8h, v15.8b
  80. VERTPLD(x1, #16)
  81. uxtl2 v15.8h, v15.16b
  82. .if \max_r < 16 // approximate
  83. ifcc adr \reg, 1f
  84. .else
  85. ifcc adrp \reg, 1f
  86. ifcc add \reg, \reg, #:lo12:1f
  87. .endif
  88. umull v12.4s, v14.4h, v0.h[0]
  89. ifcc sub \reg, \reg, x5, LSL #6
  90. umull2 v13.4s, v14.8h, v0.h[0]
  91. mov x11, x19
  92. umull v14.4s, v15.4h, v0.h[0]
  93. ifcc add \reg, \reg, x5, LSL #3
  94. umull2 v15.4s, v15.8h, v0.h[0]
  95. br \reg
  96. /* This version of the vertical fetch loop body is used away from the edges
  97. * of the source image. The pointers start at the top and bottom source rows
  98. * and work their way towards the centre on each iteration. This way the
  99. * number of taps used can be controlled by jumping directly into the middle
  100. * of the loop and running to completion.
  101. * If the loop body changes size then the code which caculates the address of
  102. * the initial iteration must be updated to accordingly.
  103. */
  104. .macro vertfetch_noclamp i, dreg
  105. .if 0 < \i && \i <= \max_r
  106. ld1 {v10.16b}, [x10], x2
  107. ld1 {v11.16b}, [x11], x13
  108. uaddl v16.8h, v10.8b, v11.8b
  109. uaddl2 v11.8h, v10.16b, v11.16b
  110. umlal v12.4s, v16.4h, \dreg
  111. umlal2 v13.4s, v16.8h, \dreg
  112. VERTPLD(x10, #32)
  113. umlal v14.4s, v11.4h, \dreg
  114. VERTPLD(x11, #32)
  115. umlal2 v15.4s, v11.8h, \dreg
  116. .endif
  117. .endm
  118. /* This version of the vertical fetch loop body is used near the edges of the
  119. * source image, where one or both of the accesses may start with a clamped
  120. * value, and the row addresses only begin to change after some number of
  121. * iterations before the end.
  122. * If the loop body changes size then the code which caculates the address of
  123. * the initial iteration must be updated to accordingly.
  124. */
  125. .macro vertfetch_clamped i, dreg
  126. .if 0 < \i && \i <= \max_r
  127. ld1 {v10.16b}, [x10], x2
  128. cmp x6, #\i
  129. ld1 {v11.16b}, [x11], x13
  130. csel x10, x15, x10, lo
  131. uaddl v16.8h, v10.8b, v11.8b
  132. cmp x7, #\i
  133. uaddl2 v11.8h, v10.16b, v11.16b
  134. csel x11, x19, x11, lo
  135. umlal v12.4s, v16.4h, \dreg
  136. umlal2 v13.4s, v16.8h, \dreg
  137. VERTPLD(x10, #32)
  138. umlal v14.4s, v11.4h, \dreg
  139. VERTPLD(x11, #32)
  140. umlal2 v15.4s, v11.8h, \dreg
  141. .endif
  142. .endm
  143. /* Entry into this unrolled loop is computed as a negative index from
  144. * \labelc at the end of the block.
  145. */
  146. .align 4
  147. vertfetch_clamped 27, v3.h[3]
  148. vertfetch_clamped 26, v3.h[2]
  149. vertfetch_clamped 25, v3.h[1]
  150. vertfetch_clamped 24, v3.h[0]
  151. vertfetch_clamped 23, v2.h[7]
  152. vertfetch_clamped 22, v2.h[6]
  153. vertfetch_clamped 21, v2.h[5]
  154. vertfetch_clamped 20, v2.h[4]
  155. vertfetch_clamped 19, v2.h[3]
  156. vertfetch_clamped 18, v2.h[2]
  157. vertfetch_clamped 17, v2.h[1]
  158. vertfetch_clamped 16, v2.h[0]
  159. vertfetch_clamped 15, v1.h[7]
  160. vertfetch_clamped 14, v1.h[6]
  161. vertfetch_clamped 13, v1.h[5]
  162. vertfetch_clamped 12, v1.h[4]
  163. vertfetch_clamped 11, v1.h[3]
  164. vertfetch_clamped 10, v1.h[2]
  165. vertfetch_clamped 9, v1.h[1]
  166. vertfetch_clamped 8, v1.h[0]
  167. vertfetch_clamped 7, v0.h[7]
  168. vertfetch_clamped 6, v0.h[6]
  169. vertfetch_clamped 5, v0.h[5]
  170. vertfetch_clamped 4, v0.h[4]
  171. vertfetch_clamped 3, v0.h[3]
  172. vertfetch_clamped 2, v0.h[2]
  173. vertfetch_clamped 1, v0.h[1]
  174. vertfetch_clamped 0, v0.h[0]
  175. 1:
  176. \labelc : b 2f /* done with clamped loop, skip over non-clamped loop */
  177. /* Entry into this unrolled loop is computed as a negative index from
  178. * \labelnc at the end of the block.
  179. */
  180. .align 4
  181. vertfetch_noclamp 27, v3.h[3]
  182. vertfetch_noclamp 26, v3.h[2]
  183. vertfetch_noclamp 25, v3.h[1]
  184. vertfetch_noclamp 24, v3.h[0]
  185. vertfetch_noclamp 23, v2.h[7]
  186. vertfetch_noclamp 22, v2.h[6]
  187. vertfetch_noclamp 21, v2.h[5]
  188. vertfetch_noclamp 20, v2.h[4]
  189. vertfetch_noclamp 19, v2.h[3]
  190. vertfetch_noclamp 18, v2.h[2]
  191. vertfetch_noclamp 17, v2.h[1]
  192. vertfetch_noclamp 16, v2.h[0]
  193. vertfetch_noclamp 15, v1.h[7]
  194. vertfetch_noclamp 14, v1.h[6]
  195. vertfetch_noclamp 13, v1.h[5]
  196. vertfetch_noclamp 12, v1.h[4]
  197. vertfetch_noclamp 11, v1.h[3]
  198. vertfetch_noclamp 10, v1.h[2]
  199. vertfetch_noclamp 9, v1.h[1]
  200. vertfetch_noclamp 8, v1.h[0]
  201. vertfetch_noclamp 7, v0.h[7]
  202. vertfetch_noclamp 6, v0.h[6]
  203. vertfetch_noclamp 5, v0.h[5]
  204. vertfetch_noclamp 4, v0.h[4]
  205. vertfetch_noclamp 3, v0.h[3]
  206. vertfetch_noclamp 2, v0.h[2]
  207. vertfetch_noclamp 1, v0.h[1]
  208. vertfetch_noclamp 0, v0.h[0]
  209. \labelnc :
  210. .purgem vertfetch_clamped
  211. .purgem vertfetch_noclamp
  212. 2: uqrshrn v10.4h, v12.4s, #16 - FRACTION_BITS
  213. add x15, x15, #16
  214. uqrshrn2 v10.8h, v13.4s, #16 - FRACTION_BITS
  215. add x19, x19, #16
  216. uqrshrn v11.4h, v14.4s, #16 - FRACTION_BITS
  217. uqrshrn2 v11.8h, v15.4s, #16 - FRACTION_BITS
  218. .endm /*}}}*/
  219. /* Some portion of the convolution window (as much as will fit, and all of it
  220. * for the uchar1 cases) is kept in the register file to avoid unnecessary
  221. * memory accesses. This forces the horizontal loops to be unrolled because
  222. * there's no indexed addressing into the register file.
  223. *
  224. * As in the fetch macro, the operations are ordered from outside to inside, so
  225. * that jumping into the middle of the block bypasses the unwanted window taps.
  226. *
  227. * There are several variants of the macro because of the fixed offets of the
  228. * taps -- the wider the maximum radius the further the centre tap is from the
  229. * most recently fetched data. This means that pre-filling the window requires
  230. * more data that won't be used and it means that rotating the window involves
  231. * more mov operations.
  232. *
  233. * When the buffer gets too big the buffer at [x9] is used.
  234. *
  235. * Input:
  236. * v16-v31,v4-v11 -- convoltion window
  237. * x9 -- pointer to additional convolution window data
  238. * Output:
  239. * x9 -- updated buffer pointer (if used)
  240. * d31 -- result to be stored
  241. * Modifies:
  242. * x12 -- temp buffer pointer
  243. * v12-v13 -- temporaries for load and vext operations.
  244. * v14-v15 -- intermediate sums
  245. */
  246. #define TUNED_LIST1 8, 16
  247. .macro hconv1_8/*{{{*/
  248. .rodata
  249. 200: .hword -4
  250. .hword 101f-100f
  251. .hword 102f-100f
  252. .hword 103f-100f
  253. .hword 104f-100f
  254. .hword 105f-100f
  255. .hword 106f-100f
  256. .hword 107f-100f
  257. .hword 108f-100f
  258. .align 4
  259. .text
  260. umull v14.4s, v9.4h, v0.h[0]
  261. umull2 v15.4s, v9.8h, v0.h[0]
  262. adrp x16, 200b
  263. add x16, x16, :lo12:200b
  264. ldrsh x12, [x16, x5, LSL #1]
  265. adr x16, 100f
  266. add x12, x12, x16
  267. 100: br x12
  268. 108: umlal v14.4s, v8.4h, v1.h[0]
  269. umlal2 v15.4s, v8.8h, v1.h[0]
  270. umlal v14.4s, v10.4h, v1.h[0]
  271. umlal2 v15.4s, v10.8h, v1.h[0]
  272. 107: ext v12.16b, v8.16b, v9.16b, #1*2
  273. ext v13.16b, v9.16b, v10.16b, #7*2
  274. umlal v14.4s, v12.4h, v0.h[7]
  275. umlal2 v15.4s, v12.8h, v0.h[7]
  276. umlal v14.4s, v13.4h, v0.h[7]
  277. umlal2 v15.4s, v13.8h, v0.h[7]
  278. 106: ext v12.16b, v8.16b, v9.16b, #2*2
  279. ext v13.16b, v9.16b, v10.16b, #6*2
  280. umlal v14.4s, v12.4h, v0.h[6]
  281. umlal2 v15.4s, v12.8h, v0.h[6]
  282. umlal v14.4s, v13.4h, v0.h[6]
  283. umlal2 v15.4s, v13.8h, v0.h[6]
  284. 105: ext v12.16b, v8.16b, v9.16b, #3*2
  285. ext v13.16b, v9.16b, v10.16b, #5*2
  286. umlal v14.4s, v12.4h, v0.h[5]
  287. umlal2 v15.4s, v12.8h, v0.h[5]
  288. umlal v14.4s, v13.4h, v0.h[5]
  289. umlal2 v15.4s, v13.8h, v0.h[5]
  290. 104: //ext v12.16b, v8.16b, v9.16b, #4*2
  291. //ext v13.16b, v9.16b, v10.16b, #4*2
  292. umlal2 v14.4s, v8.8h, v0.h[4]
  293. umlal v15.4s, v9.4h, v0.h[4]
  294. umlal2 v14.4s, v9.8h, v0.h[4]
  295. umlal v15.4s, v10.4h, v0.h[4]
  296. 103: ext v12.16b, v8.16b, v9.16b, #5*2
  297. ext v13.16b, v9.16b, v10.16b, #3*2
  298. umlal v14.4s, v12.4h, v0.h[3]
  299. umlal2 v15.4s, v12.8h, v0.h[3]
  300. umlal v14.4s, v13.4h, v0.h[3]
  301. umlal2 v15.4s, v13.8h, v0.h[3]
  302. 102: ext v12.16b, v8.16b, v9.16b, #6*2
  303. ext v13.16b, v9.16b, v10.16b, #2*2
  304. umlal v14.4s, v12.4h, v0.h[2]
  305. umlal2 v15.4s, v12.8h, v0.h[2]
  306. umlal v14.4s, v13.4h, v0.h[2]
  307. umlal2 v15.4s, v13.8h, v0.h[2]
  308. 101: ext v12.16b, v8.16b, v9.16b, #7*2
  309. ext v13.16b, v9.16b, v10.16b, #1*2
  310. umlal v14.4s, v12.4h, v0.h[1]
  311. umlal2 v15.4s, v12.8h, v0.h[1]
  312. umlal v14.4s, v13.4h, v0.h[1]
  313. umlal2 v15.4s, v13.8h, v0.h[1]
  314. uqrshrn v14.4h, v14.4s, #16
  315. uqrshrn2 v14.8h, v15.4s, #16
  316. uqrshrn v15.8b, v14.8h, #FRACTION_BITS
  317. mov v8.16b, v9.16b
  318. mov v9.16b, v10.16b
  319. mov v10.16b, v11.16b
  320. .endm/*}}}*/
  321. .macro hconv1_16/*{{{*/
  322. .rodata
  323. 200: .hword -4
  324. .hword 101f-100f
  325. .hword 102f-100f
  326. .hword 103f-100f
  327. .hword 104f-100f
  328. .hword 105f-100f
  329. .hword 106f-100f
  330. .hword 107f-100f
  331. .hword 108f-100f
  332. .hword 109f-100f
  333. .hword 110f-100f
  334. .hword 111f-100f
  335. .hword 112f-100f
  336. .hword 113f-100f
  337. .hword 114f-100f
  338. .hword 115f-100f
  339. .hword 116f-100f
  340. .align 4
  341. .text
  342. umull v14.4s, v8.4h, v0.h[0]
  343. umull2 v15.4s, v8.8h, v0.h[0]
  344. adrp x16, 200b
  345. add x16, x16, :lo12:200b
  346. ldrsh x12, [x16, x5, LSL #1]
  347. adr x16, 100f
  348. add x12, x12, x16
  349. 100: br x12
  350. 116: //ext v12.16b, v6.16b, v7.16b, #0*2
  351. //ext v13.16b, v10.16b, v11.16b, #0*2
  352. umlal v14.4s, v6.4h, v2.h[0]
  353. umlal2 v15.4s, v6.8h, v2.h[0]
  354. umlal v14.4s, v10.4h, v2.h[0]
  355. umlal2 v15.4s, v10.8h, v2.h[0]
  356. 115: ext v12.16b, v6.16b, v7.16b, #1*2
  357. ext v13.16b, v9.16b, v10.16b, #7*2
  358. umlal v14.4s, v12.4h, v1.h[7]
  359. umlal2 v15.4s, v12.8h, v1.h[7]
  360. umlal v14.4s, v13.4h, v1.h[7]
  361. umlal2 v15.4s, v13.8h, v1.h[7]
  362. 114: ext v12.16b, v6.16b, v7.16b, #2*2
  363. ext v13.16b, v9.16b, v10.16b, #6*2
  364. umlal v14.4s, v12.4h, v1.h[6]
  365. umlal2 v15.4s, v12.8h, v1.h[6]
  366. umlal v14.4s, v13.4h, v1.h[6]
  367. umlal2 v15.4s, v13.8h, v1.h[6]
  368. 113: ext v12.16b, v6.16b, v7.16b, #3*2
  369. ext v13.16b, v9.16b, v10.16b, #5*2
  370. umlal v14.4s, v12.4h, v1.h[5]
  371. umlal2 v15.4s, v12.8h, v1.h[5]
  372. umlal v14.4s, v13.4h, v1.h[5]
  373. umlal2 v15.4s, v13.8h, v1.h[5]
  374. 112: //ext v12.16b, v6.16b, v7.16b, #4*2
  375. //ext v13.16b, v9.16b, v10.16b, #4*2
  376. umlal2 v14.4s, v6.8h, v1.h[4]
  377. umlal v15.4s, v7.4h, v1.h[4]
  378. umlal2 v14.4s, v9.8h, v1.h[4]
  379. umlal v15.4s, v10.4h, v1.h[4]
  380. 111: ext v12.16b, v6.16b, v7.16b, #5*2
  381. ext v13.16b, v9.16b, v10.16b, #3*2
  382. umlal v14.4s, v12.4h, v1.h[3]
  383. umlal2 v15.4s, v12.8h, v1.h[3]
  384. umlal v14.4s, v13.4h, v1.h[3]
  385. umlal2 v15.4s, v13.8h, v1.h[3]
  386. 110: ext v12.16b, v6.16b, v7.16b, #6*2
  387. ext v13.16b, v9.16b, v10.16b, #2*2
  388. umlal v14.4s, v12.4h, v1.h[2]
  389. umlal2 v15.4s, v12.8h, v1.h[2]
  390. umlal v14.4s, v13.4h, v1.h[2]
  391. umlal2 v15.4s, v13.8h, v1.h[2]
  392. 109: ext v12.16b, v6.16b, v7.16b, #7*2
  393. ext v13.16b, v9.16b, v10.16b, #1*2
  394. umlal v14.4s, v12.4h, v1.h[1]
  395. umlal2 v15.4s, v12.8h, v1.h[1]
  396. umlal v14.4s, v13.4h, v1.h[1]
  397. umlal2 v15.4s, v13.8h, v1.h[1]
  398. 108: //ext v12.16b, v7.16b, v8.16b, #0*2
  399. //ext v13.16b, v9.16b, v10.16b, #0*2
  400. umlal v14.4s, v7.4h, v1.h[0]
  401. umlal2 v15.4s, v7.8h, v1.h[0]
  402. umlal v14.4s, v9.4h, v1.h[0]
  403. umlal2 v15.4s, v9.8h, v1.h[0]
  404. 107: ext v12.16b, v7.16b, v8.16b, #1*2
  405. ext v13.16b, v8.16b, v9.16b, #7*2
  406. umlal v14.4s, v12.4h, v0.h[7]
  407. umlal2 v15.4s, v12.8h, v0.h[7]
  408. umlal v14.4s, v13.4h, v0.h[7]
  409. umlal2 v15.4s, v13.8h, v0.h[7]
  410. 106: ext v12.16b, v7.16b, v8.16b, #2*2
  411. ext v13.16b, v8.16b, v9.16b, #6*2
  412. umlal v14.4s, v12.4h, v0.h[6]
  413. umlal2 v15.4s, v12.8h, v0.h[6]
  414. umlal v14.4s, v13.4h, v0.h[6]
  415. umlal2 v15.4s, v13.8h, v0.h[6]
  416. 105: ext v12.16b, v7.16b, v8.16b, #3*2
  417. ext v13.16b, v8.16b, v9.16b, #5*2
  418. umlal v14.4s, v12.4h, v0.h[5]
  419. umlal2 v15.4s, v12.8h, v0.h[5]
  420. umlal v14.4s, v13.4h, v0.h[5]
  421. umlal2 v15.4s, v13.8h, v0.h[5]
  422. 104: //ext v12.16b, v7.16b, v8.16b, #4*2
  423. //ext v13.16b, v8.16b, v9.16b, #4*2
  424. umlal2 v14.4s, v7.8h, v0.h[4]
  425. umlal v15.4s, v8.4h, v0.h[4]
  426. umlal2 v14.4s, v8.8h, v0.h[4]
  427. umlal v15.4s, v9.4h, v0.h[4]
  428. 103: ext v12.16b, v7.16b, v8.16b, #5*2
  429. ext v13.16b, v8.16b, v9.16b, #3*2
  430. umlal v14.4s, v12.4h, v0.h[3]
  431. umlal2 v15.4s, v12.8h, v0.h[3]
  432. umlal v14.4s, v13.4h, v0.h[3]
  433. umlal2 v15.4s, v13.8h, v0.h[3]
  434. 102: ext v12.16b, v7.16b, v8.16b, #6*2
  435. ext v13.16b, v8.16b, v9.16b, #2*2
  436. umlal v14.4s, v12.4h, v0.h[2]
  437. umlal2 v15.4s, v12.8h, v0.h[2]
  438. umlal v14.4s, v13.4h, v0.h[2]
  439. umlal2 v15.4s, v13.8h, v0.h[2]
  440. 101: ext v12.16b, v7.16b, v8.16b, #7*2
  441. ext v13.16b, v8.16b, v9.16b, #1*2
  442. umlal v14.4s, v12.4h, v0.h[1]
  443. umlal2 v15.4s, v12.8h, v0.h[1]
  444. umlal v14.4s, v13.4h, v0.h[1]
  445. umlal2 v15.4s, v13.8h, v0.h[1]
  446. uqrshrn v14.4h, v14.4s, #16
  447. uqrshrn2 v14.8h, v15.4s, #16
  448. uqrshrn v15.8b, v14.8h, #FRACTION_BITS
  449. mov v6.16b, v7.16b
  450. mov v7.16b, v8.16b
  451. mov v8.16b, v9.16b
  452. mov v9.16b, v10.16b
  453. mov v10.16b, v11.16b
  454. .endm/*}}}*/
  455. .macro hconv1_25/*{{{*/
  456. .rodata
  457. 200: .hword -4
  458. .hword 101f-100f
  459. .hword 102f-100f
  460. .hword 103f-100f
  461. .hword 104f-100f
  462. .hword 105f-100f
  463. .hword 106f-100f
  464. .hword 107f-100f
  465. .hword 108f-100f
  466. .hword 109f-100f
  467. .hword 110f-100f
  468. .hword 111f-100f
  469. .hword 112f-100f
  470. .hword 113f-100f
  471. .hword 114f-100f
  472. .hword 115f-100f
  473. .hword 116f-100f
  474. .hword 117f-100f
  475. .hword 118f-100f
  476. .hword 119f-100f
  477. .hword 120f-100f
  478. .hword 121f-100f
  479. .hword 122f-100f
  480. .hword 123f-100f
  481. .hword 124f-100f
  482. .hword 125f-100f
  483. .align 4
  484. .text
  485. ext v12.16b, v6.16b, v7.16b, #7*2
  486. umull v14.4s, v12.4h, v0.h[0]
  487. umull2 v15.4s, v12.8h, v0.h[0]
  488. adrp x16, 200b
  489. add x16, x16, :lo12:200b
  490. ldrsh x12, [x16, x5, LSL #1]
  491. adr x16, 100f
  492. add x12, x12, x16
  493. 100: br x12
  494. 125: ext v12.16b, v31.16b, v4.16b, #6*2
  495. ext v13.16b, v10.16b, v11.16b, #0*2
  496. umlal v14.4s, v12.4h, v3.h[1]
  497. umlal2 v15.4s, v12.8h, v3.h[1]
  498. umlal v14.4s, v13.4h, v3.h[1]
  499. umlal2 v15.4s, v13.8h, v3.h[1]
  500. 124: ext v12.16b, v31.16b, v4.16b, #7*2
  501. ext v13.16b, v9.16b, v10.16b, #7*2
  502. umlal v14.4s, v12.4h, v3.h[0]
  503. umlal2 v15.4s, v12.8h, v3.h[0]
  504. umlal v14.4s, v13.4h, v3.h[0]
  505. umlal2 v15.4s, v13.8h, v3.h[0]
  506. 123: ext v12.16b, v4.16b, v5.16b, #0*2
  507. ext v13.16b, v9.16b, v10.16b, #6*2
  508. umlal v14.4s, v12.4h, v2.h[7]
  509. umlal2 v15.4s, v12.8h, v2.h[7]
  510. umlal v14.4s, v13.4h, v2.h[7]
  511. umlal2 v15.4s, v13.8h, v2.h[7]
  512. 122: ext v12.16b, v4.16b, v5.16b, #1*2
  513. ext v13.16b, v9.16b, v10.16b, #5*2
  514. umlal v14.4s, v12.4h, v2.h[6]
  515. umlal2 v15.4s, v12.8h, v2.h[6]
  516. umlal v14.4s, v13.4h, v2.h[6]
  517. umlal2 v15.4s, v13.8h, v2.h[6]
  518. 121: ext v12.16b, v4.16b, v5.16b, #2*2
  519. ext v13.16b, v9.16b, v10.16b, #4*2
  520. umlal v14.4s, v12.4h, v2.h[5]
  521. umlal2 v15.4s, v12.8h, v2.h[5]
  522. umlal v14.4s, v13.4h, v2.h[5]
  523. umlal2 v15.4s, v13.8h, v2.h[5]
  524. 120: ext v12.16b, v4.16b, v5.16b, #3*2
  525. ext v13.16b, v9.16b, v10.16b, #3*2
  526. umlal v14.4s, v12.4h, v2.h[4]
  527. umlal2 v15.4s, v12.8h, v2.h[4]
  528. umlal v14.4s, v13.4h, v2.h[4]
  529. umlal2 v15.4s, v13.8h, v2.h[4]
  530. 119: ext v12.16b, v4.16b, v5.16b, #4*2
  531. ext v13.16b, v9.16b, v10.16b, #2*2
  532. umlal v14.4s, v12.4h, v2.h[3]
  533. umlal2 v15.4s, v12.8h, v2.h[3]
  534. umlal v14.4s, v13.4h, v2.h[3]
  535. umlal2 v15.4s, v13.8h, v2.h[3]
  536. 118: ext v12.16b, v4.16b, v5.16b, #5*2
  537. ext v13.16b, v9.16b, v10.16b, #1*2
  538. umlal v14.4s, v12.4h, v2.h[2]
  539. umlal2 v15.4s, v12.8h, v2.h[2]
  540. umlal v14.4s, v13.4h, v2.h[2]
  541. umlal2 v15.4s, v13.8h, v2.h[2]
  542. 117: ext v12.16b, v4.16b, v5.16b, #6*2
  543. ext v13.16b, v9.16b, v10.16b, #0*2
  544. umlal v14.4s, v12.4h, v2.h[1]
  545. umlal2 v15.4s, v12.8h, v2.h[1]
  546. umlal v14.4s, v13.4h, v2.h[1]
  547. umlal2 v15.4s, v13.8h, v2.h[1]
  548. 116: ext v12.16b, v4.16b, v5.16b, #7*2
  549. ext v13.16b, v8.16b, v9.16b, #7*2
  550. umlal v14.4s, v12.4h, v2.h[0]
  551. umlal2 v15.4s, v12.8h, v2.h[0]
  552. umlal v14.4s, v13.4h, v2.h[0]
  553. umlal2 v15.4s, v13.8h, v2.h[0]
  554. 115: ext v12.16b, v5.16b, v6.16b, #0*2
  555. ext v13.16b, v8.16b, v9.16b, #6*2
  556. umlal v14.4s, v12.4h, v1.h[7]
  557. umlal2 v15.4s, v12.8h, v1.h[7]
  558. umlal v14.4s, v13.4h, v1.h[7]
  559. umlal2 v15.4s, v13.8h, v1.h[7]
  560. 114: ext v12.16b, v5.16b, v6.16b, #1*2
  561. ext v13.16b, v8.16b, v9.16b, #5*2
  562. umlal v14.4s, v12.4h, v1.h[6]
  563. umlal2 v15.4s, v12.8h, v1.h[6]
  564. umlal v14.4s, v13.4h, v1.h[6]
  565. umlal2 v15.4s, v13.8h, v1.h[6]
  566. 113: ext v12.16b, v5.16b, v6.16b, #2*2
  567. ext v13.16b, v8.16b, v9.16b, #4*2
  568. umlal v14.4s, v12.4h, v1.h[5]
  569. umlal2 v15.4s, v12.8h, v1.h[5]
  570. umlal v14.4s, v13.4h, v1.h[5]
  571. umlal2 v15.4s, v13.8h, v1.h[5]
  572. 112: ext v12.16b, v5.16b, v6.16b, #3*2
  573. ext v13.16b, v8.16b, v9.16b, #3*2
  574. umlal v14.4s, v12.4h, v1.h[4]
  575. umlal2 v15.4s, v12.8h, v1.h[4]
  576. umlal v14.4s, v13.4h, v1.h[4]
  577. umlal2 v15.4s, v13.8h, v1.h[4]
  578. 111: ext v12.16b, v5.16b, v6.16b, #4*2
  579. ext v13.16b, v8.16b, v9.16b, #2*2
  580. umlal v14.4s, v12.4h, v1.h[3]
  581. umlal2 v15.4s, v12.8h, v1.h[3]
  582. umlal v14.4s, v13.4h, v1.h[3]
  583. umlal2 v15.4s, v13.8h, v1.h[3]
  584. 110: ext v12.16b, v5.16b, v6.16b, #5*2
  585. ext v13.16b, v8.16b, v9.16b, #1*2
  586. umlal v14.4s, v12.4h, v1.h[2]
  587. umlal2 v15.4s, v12.8h, v1.h[2]
  588. umlal v14.4s, v13.4h, v1.h[2]
  589. umlal2 v15.4s, v13.8h, v1.h[2]
  590. 109: ext v12.16b, v5.16b, v6.16b, #6*2
  591. ext v13.16b, v8.16b, v9.16b, #0*2
  592. umlal v14.4s, v12.4h, v1.h[1]
  593. umlal2 v15.4s, v12.8h, v1.h[1]
  594. umlal v14.4s, v13.4h, v1.h[1]
  595. umlal2 v15.4s, v13.8h, v1.h[1]
  596. 108: ext v12.16b, v5.16b, v6.16b, #7*2
  597. ext v13.16b, v7.16b, v8.16b, #7*2
  598. umlal v14.4s, v12.4h, v1.h[0]
  599. umlal2 v15.4s, v12.8h, v1.h[0]
  600. umlal v14.4s, v13.4h, v1.h[0]
  601. umlal2 v15.4s, v13.8h, v1.h[0]
  602. 107: ext v12.16b, v6.16b, v7.16b, #0*2
  603. ext v13.16b, v7.16b, v8.16b, #6*2
  604. umlal v14.4s, v12.4h, v0.h[7]
  605. umlal2 v15.4s, v12.8h, v0.h[7]
  606. umlal v14.4s, v13.4h, v0.h[7]
  607. umlal2 v15.4s, v13.8h, v0.h[7]
  608. 106: ext v12.16b, v6.16b, v7.16b, #1*2
  609. ext v13.16b, v7.16b, v8.16b, #5*2
  610. umlal v14.4s, v12.4h, v0.h[6]
  611. umlal2 v15.4s, v12.8h, v0.h[6]
  612. umlal v14.4s, v13.4h, v0.h[6]
  613. umlal2 v15.4s, v13.8h, v0.h[6]
  614. 105: ext v12.16b, v6.16b, v7.16b, #2*2
  615. ext v13.16b, v7.16b, v8.16b, #4*2
  616. umlal v14.4s, v12.4h, v0.h[5]
  617. umlal2 v15.4s, v12.8h, v0.h[5]
  618. umlal v14.4s, v13.4h, v0.h[5]
  619. umlal2 v15.4s, v13.8h, v0.h[5]
  620. 104: ext v12.16b, v6.16b, v7.16b, #3*2
  621. ext v13.16b, v7.16b, v8.16b, #3*2
  622. umlal v14.4s, v12.4h, v0.h[4]
  623. umlal2 v15.4s, v12.8h, v0.h[4]
  624. umlal v14.4s, v13.4h, v0.h[4]
  625. umlal2 v15.4s, v13.8h, v0.h[4]
  626. 103: ext v12.16b, v6.16b, v7.16b, #4*2
  627. ext v13.16b, v7.16b, v8.16b, #2*2
  628. umlal v14.4s, v12.4h, v0.h[3]
  629. umlal2 v15.4s, v12.8h, v0.h[3]
  630. umlal v14.4s, v13.4h, v0.h[3]
  631. umlal2 v15.4s, v13.8h, v0.h[3]
  632. 102: ext v12.16b, v6.16b, v7.16b, #5*2
  633. ext v13.16b, v7.16b, v8.16b, #1*2
  634. umlal v14.4s, v12.4h, v0.h[2]
  635. umlal2 v15.4s, v12.8h, v0.h[2]
  636. umlal v14.4s, v13.4h, v0.h[2]
  637. umlal2 v15.4s, v13.8h, v0.h[2]
  638. 101: ext v12.16b, v6.16b, v7.16b, #6*2
  639. ext v13.16b, v7.16b, v8.16b, #0*2
  640. umlal v14.4s, v12.4h, v0.h[1]
  641. umlal2 v15.4s, v12.8h, v0.h[1]
  642. umlal v14.4s, v13.4h, v0.h[1]
  643. umlal2 v15.4s, v13.8h, v0.h[1]
  644. uqrshrn v14.4h, v14.4s, #16
  645. uqrshrn2 v14.8h, v15.4s, #16
  646. uqrshrn v15.8b, v14.8h, #FRACTION_BITS
  647. mov v31.16b, v4.16b
  648. mov v4.16b, v5.16b
  649. mov v5.16b, v6.16b
  650. mov v6.16b, v7.16b
  651. mov v7.16b, v8.16b
  652. mov v8.16b, v9.16b
  653. mov v9.16b, v10.16b
  654. mov v10.16b, v11.16b
  655. .endm/*}}}*/
  656. #define TUNED_LIST4 6, 12, 20
  657. .macro hconv4_6/*{{{*/
  658. .rodata
  659. 200: .hword -4
  660. .hword 101f-100f
  661. .hword 102f-100f
  662. .hword 103f-100f
  663. .hword 104f-100f
  664. .hword 105f-100f
  665. .hword 106f-100f
  666. .align 4
  667. .text
  668. umull v14.4s, v7.4h, v0.h[0]
  669. umull2 v15.4s, v7.8h, v0.h[0]
  670. adrp x16, 200b
  671. add x16, x16, :lo12:200b
  672. ldrsh x12, [x16, x5, LSL #1]
  673. adr x16, 100f
  674. add x12, x12, x16
  675. 100: br x12
  676. 106: umlal v14.4s, v4.4h, v0.h[6]
  677. umlal2 v15.4s, v4.8h, v0.h[6]
  678. umlal v14.4s, v10.4h, v0.h[6]
  679. umlal2 v15.4s, v10.8h, v0.h[6]
  680. 105: umlal2 v14.4s, v4.8h, v0.h[5]
  681. umlal v15.4s, v5.4h, v0.h[5]
  682. umlal2 v14.4s, v9.8h, v0.h[5]
  683. umlal v15.4s, v10.4h, v0.h[5]
  684. 104: umlal v14.4s, v5.4h, v0.h[4]
  685. umlal2 v15.4s, v5.8h, v0.h[4]
  686. umlal v14.4s, v9.4h, v0.h[4]
  687. umlal2 v15.4s, v9.8h, v0.h[4]
  688. 103: umlal2 v14.4s, v5.8h, v0.h[3]
  689. umlal v15.4s, v6.4h, v0.h[3]
  690. umlal2 v14.4s, v8.8h, v0.h[3]
  691. umlal v15.4s, v9.4h, v0.h[3]
  692. 102: umlal v14.4s, v6.4h, v0.h[2]
  693. umlal2 v15.4s, v6.8h, v0.h[2]
  694. umlal v14.4s, v8.4h, v0.h[2]
  695. umlal2 v15.4s, v8.8h, v0.h[2]
  696. 101: umlal2 v14.4s, v6.8h, v0.h[1]
  697. umlal v15.4s, v7.4h, v0.h[1]
  698. umlal2 v14.4s, v7.8h, v0.h[1]
  699. umlal v15.4s, v8.4h, v0.h[1]
  700. uqrshrn v14.4h, v14.4s, #16
  701. uqrshrn2 v14.8h, v15.4s, #16
  702. uqrshrn v15.8b, v14.8h, #FRACTION_BITS
  703. mov v4.16b, v5.16b
  704. mov v5.16b, v6.16b
  705. mov v6.16b, v7.16b
  706. mov v7.16b, v8.16b
  707. mov v8.16b, v9.16b
  708. mov v9.16b, v10.16b
  709. mov v10.16b, v11.16b
  710. .endm/*}}}*/
  711. .macro hconv4_12/*{{{*/
  712. .rodata
  713. 200: .hword -4 //Might need to remove these...
  714. .hword 101f-100f
  715. .hword 102f-100f
  716. .hword 103f-100f
  717. .hword 104f-100f
  718. .hword 105f-100f
  719. .hword 106f-100f
  720. .hword 107f-100f
  721. .hword 108f-100f
  722. .hword 109f-100f
  723. .hword 110f-100f
  724. .hword 111f-100f
  725. .hword 112f-100f
  726. .align 4
  727. .text
  728. umull v14.4s, v4.4h, v0.h[0]
  729. umull2 v15.4s, v4.8h, v0.h[0]
  730. adrp x16, 200b
  731. add x16, x16, :lo12:200b
  732. ldrsh x12, [x16, x5, LSL #1]
  733. adr x16, 100f
  734. add x12, x12, x16
  735. 100: br x12
  736. 112: umlal v14.4s, v26.4h, v1.h[4]
  737. umlal2 v15.4s, v26.8h, v1.h[4]
  738. umlal v14.4s, v10.4h, v1.h[4]
  739. umlal2 v15.4s, v10.8h, v1.h[4]
  740. 111: umlal2 v14.4s, v26.8h, v1.h[3]
  741. umlal v15.4s, v27.4h, v1.h[3]
  742. umlal2 v14.4s, v9.8h, v1.h[3]
  743. umlal v15.4s, v10.4h, v1.h[3]
  744. 110: umlal v14.4s, v27.4h, v1.h[2]
  745. umlal2 v15.4s, v27.8h, v1.h[2]
  746. umlal v14.4s, v9.4h, v1.h[2]
  747. umlal2 v15.4s, v9.8h, v1.h[2]
  748. 109: umlal2 v14.4s, v27.8h, v1.h[1]
  749. umlal v15.4s, v28.4h, v1.h[1]
  750. umlal2 v14.4s, v8.8h, v1.h[1]
  751. umlal v15.4s, v9.4h, v1.h[1]
  752. 108: umlal v14.4s, v28.4h, v1.h[0]
  753. umlal2 v15.4s, v28.8h, v1.h[0]
  754. umlal v14.4s, v8.4h, v1.h[0]
  755. umlal2 v15.4s, v8.8h, v1.h[0]
  756. 107: umlal2 v14.4s, v28.8h, v0.h[7]
  757. umlal v15.4s, v29.4h, v0.h[7]
  758. umlal2 v14.4s, v7.8h, v0.h[7]
  759. umlal v15.4s, v8.4h, v0.h[7]
  760. 106: umlal v14.4s, v29.4h, v0.h[6]
  761. umlal2 v15.4s, v29.8h, v0.h[6]
  762. umlal v14.4s, v7.4h, v0.h[6]
  763. umlal2 v15.4s, v7.8h, v0.h[6]
  764. 105: umlal2 v14.4s, v29.8h, v0.h[5]
  765. umlal v15.4s, v30.4h, v0.h[5]
  766. umlal2 v14.4s, v6.8h, v0.h[5]
  767. umlal v15.4s, v7.4h, v0.h[5]
  768. 104: umlal v14.4s, v30.4h, v0.h[4]
  769. umlal2 v15.4s, v30.8h, v0.h[4]
  770. umlal v14.4s, v6.4h, v0.h[4]
  771. umlal2 v15.4s, v6.8h, v0.h[4]
  772. 103: umlal2 v14.4s, v30.8h, v0.h[3]
  773. umlal v15.4s, v31.4h, v0.h[3]
  774. umlal2 v14.4s, v5.8h, v0.h[3]
  775. umlal v15.4s, v6.4h, v0.h[3]
  776. 102: umlal v14.4s, v31.4h, v0.h[2]
  777. umlal2 v15.4s, v31.8h, v0.h[2]
  778. umlal v14.4s, v5.4h, v0.h[2]
  779. umlal2 v15.4s, v5.8h, v0.h[2]
  780. 101: umlal2 v14.4s, v31.8h, v0.h[1]
  781. umlal v15.4s, v4.4h, v0.h[1]
  782. umlal2 v14.4s, v4.8h, v0.h[1]
  783. umlal v15.4s, v5.4h, v0.h[1]
  784. uqrshrn v14.4h, v14.4s, #16
  785. uqrshrn2 v14.8h, v15.4s, #16
  786. uqrshrn v15.8b, v14.8h, #FRACTION_BITS
  787. mov v26.16b, v27.16b
  788. mov v27.16b, v28.16b
  789. mov v28.16b, v29.16b
  790. mov v29.16b, v30.16b
  791. mov v30.16b, v31.16b
  792. mov v31.16b, v4.16b
  793. mov v4.16b, v5.16b
  794. mov v5.16b, v6.16b
  795. mov v6.16b, v7.16b
  796. mov v7.16b, v8.16b
  797. mov v8.16b, v9.16b
  798. mov v9.16b, v10.16b
  799. mov v10.16b, v11.16b
  800. .endm/*}}}*/
  801. .macro hconv4_20/*{{{*/
  802. .rodata
  803. 200: .hword -4
  804. .hword 101f-100f
  805. .hword 102f-100f
  806. .hword 103f-100f
  807. .hword 104f-100f
  808. .hword 105f-100f
  809. .hword 106f-100f
  810. .hword 107f-100f
  811. .hword 108f-100f
  812. .hword 109f-100f
  813. .hword 110f-100f
  814. .hword 111f-100f
  815. .hword 112f-100f
  816. .hword 113f-100f
  817. .hword 114f-100f
  818. .hword 115f-100f
  819. .hword 116f-100f
  820. .hword 117f-100f
  821. .hword 118f-100f
  822. .hword 119f-100f
  823. .hword 120f-100f
  824. .align 4
  825. .text
  826. umull v14.4s, v28.4h, v0.h[0]
  827. umull2 v15.4s, v28.8h, v0.h[0]
  828. adrp x16, 200b
  829. add x16, x16, :lo12:200b
  830. ldrsh x12, [x16, x5, LSL #1]
  831. adr x16, 100f
  832. add x12, x12, x16
  833. 100: br x12
  834. 120: umlal v14.4s, v18.4h, v2.h[4]
  835. umlal2 v15.4s, v18.8h, v2.h[4]
  836. umlal v14.4s, v10.4h, v2.h[4]
  837. umlal2 v15.4s, v10.8h, v2.h[4]
  838. 119: umlal2 v14.4s, v18.8h, v2.h[3]
  839. umlal v15.4s, v19.4h, v2.h[3]
  840. umlal2 v14.4s, v9.8h, v2.h[3]
  841. umlal v15.4s, v10.4h, v2.h[3]
  842. 118: umlal v14.4s, v19.4h, v2.h[2]
  843. umlal2 v15.4s, v19.8h, v2.h[2]
  844. umlal v14.4s, v9.4h, v2.h[2]
  845. umlal2 v15.4s, v9.8h, v2.h[2]
  846. 117: umlal2 v14.4s, v19.8h, v2.h[1]
  847. umlal v15.4s, v20.4h, v2.h[1]
  848. umlal2 v14.4s, v8.8h, v2.h[1]
  849. umlal v15.4s, v9.4h, v2.h[1]
  850. 116: umlal v14.4s, v20.4h, v2.h[0]
  851. umlal2 v15.4s, v20.8h, v2.h[0]
  852. umlal v14.4s, v8.4h, v2.h[0]
  853. umlal2 v15.4s, v8.8h, v2.h[0]
  854. 115: umlal2 v14.4s, v20.8h, v1.h[7]
  855. umlal v15.4s, v21.4h, v1.h[7]
  856. umlal2 v14.4s, v7.8h, v1.h[7]
  857. umlal v15.4s, v8.4h, v1.h[7]
  858. 114: umlal v14.4s, v21.4h, v1.h[6]
  859. umlal2 v15.4s, v21.8h, v1.h[6]
  860. umlal v14.4s, v7.4h, v1.h[6]
  861. umlal2 v15.4s, v7.8h, v1.h[6]
  862. 113: umlal2 v14.4s, v21.8h, v1.h[5]
  863. umlal v15.4s, v22.4h, v1.h[5]
  864. umlal2 v14.4s, v6.8h, v1.h[5]
  865. umlal v15.4s, v7.4h, v1.h[5]
  866. 112: umlal v14.4s, v22.4h, v1.h[4]
  867. umlal2 v15.4s, v22.8h, v1.h[4]
  868. umlal v14.4s, v6.4h, v1.h[4]
  869. umlal2 v15.4s, v6.8h, v1.h[4]
  870. 111: umlal2 v14.4s, v22.8h, v1.h[3]
  871. umlal v15.4s, v23.4h, v1.h[3]
  872. umlal2 v14.4s, v5.8h, v1.h[3]
  873. umlal v15.4s, v6.4h, v1.h[3]
  874. 110: umlal v14.4s, v23.4h, v1.h[2]
  875. umlal2 v15.4s, v23.8h, v1.h[2]
  876. umlal v14.4s, v5.4h, v1.h[2]
  877. umlal2 v15.4s, v5.8h, v1.h[2]
  878. 109: umlal2 v14.4s, v23.8h, v1.h[1]
  879. umlal v15.4s, v24.4h, v1.h[1]
  880. umlal2 v14.4s, v4.8h, v1.h[1]
  881. umlal v15.4s, v5.4h, v1.h[1]
  882. 108: umlal v14.4s, v24.4h, v1.h[0]
  883. umlal2 v15.4s, v24.8h, v1.h[0]
  884. umlal v14.4s, v4.4h, v1.h[0]
  885. umlal2 v15.4s, v4.8h, v1.h[0]
  886. 107: umlal2 v14.4s, v24.8h, v0.h[7]
  887. umlal v15.4s, v25.4h, v0.h[7]
  888. umlal2 v14.4s, v31.8h, v0.h[7]
  889. umlal v15.4s, v4.4h, v0.h[7]
  890. 106: umlal v14.4s, v25.4h, v0.h[6]
  891. umlal2 v15.4s, v25.8h, v0.h[6]
  892. umlal v14.4s, v31.4h, v0.h[6]
  893. umlal2 v15.4s, v31.8h, v0.h[6]
  894. 105: umlal2 v14.4s, v25.8h, v0.h[5]
  895. umlal v15.4s, v26.4h, v0.h[5]
  896. umlal2 v14.4s, v30.8h, v0.h[5]
  897. umlal v15.4s, v31.4h, v0.h[5]
  898. 104: umlal v14.4s, v26.4h, v0.h[4]
  899. umlal2 v15.4s, v26.8h, v0.h[4]
  900. umlal v14.4s, v30.4h, v0.h[4]
  901. umlal2 v15.4s, v30.8h, v0.h[4]
  902. 103: umlal2 v14.4s, v26.8h, v0.h[3]
  903. umlal v15.4s, v27.4h, v0.h[3]
  904. umlal2 v14.4s, v29.8h, v0.h[3]
  905. umlal v15.4s, v30.4h, v0.h[3]
  906. 102: umlal v14.4s, v27.4h, v0.h[2]
  907. umlal2 v15.4s, v27.8h, v0.h[2]
  908. umlal v14.4s, v29.4h, v0.h[2]
  909. umlal2 v15.4s, v29.8h, v0.h[2]
  910. 101: umlal2 v14.4s, v27.8h, v0.h[1]
  911. umlal v15.4s, v28.4h, v0.h[1]
  912. umlal2 v14.4s, v28.8h, v0.h[1]
  913. umlal v15.4s, v29.4h, v0.h[1]
  914. uqrshrn v14.4h, v14.4s, #16
  915. uqrshrn2 v14.8h, v15.4s, #16
  916. uqrshrn v15.8b, v14.8h, #FRACTION_BITS
  917. mov v18.16b, v19.16b
  918. mov v19.16b, v20.16b
  919. mov v20.16b, v21.16b
  920. mov v21.16b, v22.16b
  921. mov v22.16b, v23.16b
  922. mov v23.16b, v24.16b
  923. mov v24.16b, v25.16b
  924. mov v25.16b, v26.16b
  925. mov v26.16b, v27.16b
  926. mov v27.16b, v28.16b
  927. mov v28.16b, v29.16b
  928. mov v29.16b, v30.16b
  929. mov v30.16b, v31.16b
  930. mov v31.16b, v4.16b
  931. mov v4.16b, v5.16b
  932. mov v5.16b, v6.16b
  933. mov v6.16b, v7.16b
  934. mov v7.16b, v8.16b
  935. mov v8.16b, v9.16b
  936. mov v9.16b, v10.16b
  937. mov v10.16b, v11.16b
  938. .endm/*}}}*/
  939. .macro hconv4_25/*{{{*/
  940. .rodata
  941. 200: .hword -4
  942. .hword 101f-100f
  943. .hword 102f-100f
  944. .hword 103f-100f
  945. .hword 104f-100f
  946. .hword 105f-100f
  947. .hword 106f-100f
  948. .hword 107f-100f
  949. .hword 108f-100f
  950. .hword 109f-100f
  951. .hword 110f-100f
  952. .hword 111f-100f
  953. .hword 112f-100f
  954. .hword 113f-100f
  955. .hword 114f-100f
  956. .hword 115f-100f
  957. .hword 116f-100f
  958. .hword 117f-100f
  959. .hword 118f-100f
  960. .hword 119f-100f
  961. .hword 120f-100f
  962. .hword 121f-100f
  963. .hword 122f-100f
  964. .hword 123f-100f
  965. .hword 124f-100f
  966. .hword 125f-100f
  967. .align 4
  968. .text
  969. umull2 v14.4s, v25.8h, v0.h[0]
  970. umull v15.4s, v26.4h, v0.h[0]
  971. adrp x16, 200b
  972. add x16, x16, :lo12:200b
  973. ldrsh x12, [x16, x5, LSL #1]
  974. adr x16, 100f
  975. add x12, x12, x16
  976. 100: br x12
  977. 125: ld1 {v12.8h}, [x9]
  978. umlal v14.4s, v12.4h, v3.h[1]
  979. umlal2 v15.4s, v12.8h, v3.h[1]
  980. umlal v14.4s, v10.4h, v3.h[1]
  981. umlal2 v15.4s, v10.8h, v3.h[1]
  982. 124: add x12, x9, #0x08
  983. bic x12, x12, #0x40
  984. ld1 {v12.4h}, [x12], #8
  985. bic x12, x12, #0x40
  986. ld1 {v13.4h}, [x12]
  987. umlal v14.4s, v12.4h, v3.h[0]
  988. umlal v15.4s, v13.4h, v3.h[0]
  989. umlal2 v14.4s, v9.8h, v3.h[0]
  990. umlal v15.4s, v10.4h, v3.h[0]
  991. 123: add x12, x9, #0x10
  992. bic x12, x12, #0x40
  993. ld1 {v12.8h}, [x12]
  994. umlal v14.4s, v12.4h, v2.h[7]
  995. umlal2 v15.4s, v12.8h, v2.h[7]
  996. umlal v14.4s, v9.4h, v2.h[7]
  997. umlal2 v15.4s, v9.8h, v2.h[7]
  998. 122: add x12, x9, #0x18
  999. bic x12, x12, #0x40
  1000. ld1 {v12.4h}, [x12], #8
  1001. bic x12, x12, #0x40
  1002. ld1 {v13.4h}, [x12]
  1003. umlal v14.4s, v12.4h, v2.h[6]
  1004. umlal v15.4s, v13.4h, v2.h[6]
  1005. umlal2 v14.4s, v8.8h, v2.h[6]
  1006. umlal v15.4s, v9.4h, v2.h[6]
  1007. 121: add x12, x9, #0x20
  1008. bic x12, x12, #0x40
  1009. ld1 {v12.8h}, [x12]
  1010. umlal v14.4s, v12.4h, v2.h[5]
  1011. umlal2 v15.4s, v12.8h, v2.h[5]
  1012. umlal v14.4s, v8.4h, v2.h[5]
  1013. umlal2 v15.4s, v8.8h, v2.h[5]
  1014. 120: add x12, x9, #0x28
  1015. bic x12, x12, #0x40
  1016. ld1 {v12.4h}, [x12], #8
  1017. bic x12, x12, #0x40
  1018. ld1 {v13.4h}, [x12]
  1019. umlal v14.4s, v12.4h, v2.h[4]
  1020. umlal v15.4s, v13.4h, v2.h[4]
  1021. umlal2 v14.4s, v7.8h, v2.h[4]
  1022. umlal v15.4s, v8.4h, v2.h[4]
  1023. 119: add x12, x9, #0x30
  1024. bic x12, x12, #0x40
  1025. ld1 {v12.8h}, [x12]
  1026. umlal v14.4s, v12.4h, v2.h[3]
  1027. umlal2 v15.4s, v12.8h, v2.h[3]
  1028. umlal v14.4s, v7.4h, v2.h[3]
  1029. umlal2 v15.4s, v7.8h, v2.h[3]
  1030. 118: add x12, x9, #0x38
  1031. bic x12, x12, #0x40
  1032. ld1 {v12.4h}, [x12]
  1033. umlal v14.4s, v12.4h, v2.h[2]
  1034. umlal v15.4s, v17.4h, v2.h[2]
  1035. umlal2 v14.4s, v6.8h, v2.h[2]
  1036. umlal v15.4s, v7.4h, v2.h[2]
  1037. 117: umlal v14.4s, v17.4h, v2.h[1]
  1038. umlal2 v15.4s, v17.8h, v2.h[1]
  1039. umlal v14.4s, v6.4h, v2.h[1]
  1040. umlal2 v15.4s, v6.8h, v2.h[1]
  1041. 116: umlal2 v14.4s, v17.8h, v2.h[0]
  1042. umlal v15.4s, v18.4h, v2.h[0]
  1043. umlal2 v14.4s, v5.8h, v2.h[0]
  1044. umlal v15.4s, v6.4h, v2.h[0]
  1045. 115: umlal v14.4s, v18.4h, v1.h[7]
  1046. umlal2 v15.4s, v18.8h, v1.h[7]
  1047. umlal v14.4s, v5.4h, v1.h[7]
  1048. umlal2 v15.4s, v5.8h, v1.h[7]
  1049. 114: umlal2 v14.4s, v18.8h, v1.h[6]
  1050. umlal v15.4s, v19.4h, v1.h[6]
  1051. umlal2 v14.4s, v4.8h, v1.h[6]
  1052. umlal v15.4s, v5.4h, v1.h[6]
  1053. 113: umlal v14.4s, v19.4h, v1.h[5]
  1054. umlal2 v15.4s, v19.8h, v1.h[5]
  1055. umlal v14.4s, v4.4h, v1.h[5]
  1056. umlal2 v15.4s, v4.8h, v1.h[5]
  1057. 112: umlal2 v14.4s, v19.8h, v1.h[4]
  1058. umlal v15.4s, v20.4h, v1.h[4]
  1059. umlal2 v14.4s, v31.8h, v1.h[4]
  1060. umlal v15.4s, v4.4h, v1.h[4]
  1061. 111: umlal v14.4s, v20.4h, v1.h[3]
  1062. umlal2 v15.4s, v20.8h, v1.h[3]
  1063. umlal v14.4s, v31.4h, v1.h[3]
  1064. umlal2 v15.4s, v31.8h, v1.h[3]
  1065. 110: umlal2 v14.4s, v20.8h, v1.h[2]
  1066. umlal v15.4s, v21.4h, v1.h[2]
  1067. umlal2 v14.4s, v30.8h, v1.h[2]
  1068. umlal v15.4s, v31.4h, v1.h[2]
  1069. 109: umlal v14.4s, v21.4h, v1.h[1]
  1070. umlal2 v15.4s, v21.8h, v1.h[1]
  1071. umlal v14.4s, v30.4h, v1.h[1]
  1072. umlal2 v15.4s, v30.8h, v1.h[1]
  1073. 108: umlal2 v14.4s, v21.8h, v1.h[0]
  1074. umlal v15.4s, v22.4h, v1.h[0]
  1075. umlal2 v14.4s, v29.8h, v1.h[0]
  1076. umlal v15.4s, v30.4h, v1.h[0]
  1077. 107: umlal v14.4s, v22.4h, v0.h[7]
  1078. umlal2 v15.4s, v22.8h, v0.h[7]
  1079. umlal v14.4s, v29.4h, v0.h[7]
  1080. umlal2 v15.4s, v29.8h, v0.h[7]
  1081. 106: umlal2 v14.4s, v22.8h, v0.h[6]
  1082. umlal v15.4s, v23.4h, v0.h[6]
  1083. umlal2 v14.4s, v28.8h, v0.h[6]
  1084. umlal v15.4s, v29.4h, v0.h[6]
  1085. 105: umlal v14.4s, v23.4h, v0.h[5]
  1086. umlal2 v15.4s, v23.8h, v0.h[5]
  1087. umlal v14.4s, v28.4h, v0.h[5]
  1088. umlal2 v15.4s, v28.8h, v0.h[5]
  1089. 104: umlal2 v14.4s, v23.8h, v0.h[4]
  1090. umlal v15.4s, v24.4h, v0.h[4]
  1091. umlal2 v14.4s, v27.8h, v0.h[4]
  1092. umlal v15.4s, v28.4h, v0.h[4]
  1093. 103: umlal v14.4s, v24.4h, v0.h[3]
  1094. umlal2 v15.4s, v24.8h, v0.h[3]
  1095. umlal v14.4s, v27.4h, v0.h[3]
  1096. umlal2 v15.4s, v27.8h, v0.h[3]
  1097. 102: umlal2 v14.4s, v24.8h, v0.h[2]
  1098. umlal v15.4s, v25.4h, v0.h[2]
  1099. umlal2 v14.4s, v26.8h, v0.h[2]
  1100. umlal v15.4s, v27.4h, v0.h[2]
  1101. 101: umlal v14.4s, v25.4h, v0.h[1]
  1102. umlal2 v15.4s, v25.8h, v0.h[1]
  1103. umlal v14.4s, v26.4h, v0.h[1]
  1104. umlal2 v15.4s, v26.8h, v0.h[1]
  1105. uqrshrn v14.4h, v14.4s, #16
  1106. uqrshrn2 v14.8h, v15.4s, #16
  1107. uqrshrn v15.8b, v14.8h, #FRACTION_BITS
  1108. st1 {v17.16b}, [x9], #16
  1109. bic x9, x9, #0x40
  1110. mov v17.16b, v18.16b
  1111. mov v18.16b, v19.16b
  1112. mov v19.16b, v20.16b
  1113. mov v20.16b, v21.16b
  1114. mov v21.16b, v22.16b
  1115. mov v22.16b, v23.16b
  1116. mov v23.16b, v24.16b
  1117. mov v24.16b, v25.16b
  1118. mov v25.16b, v26.16b
  1119. mov v26.16b, v27.16b
  1120. mov v27.16b, v28.16b
  1121. mov v28.16b, v29.16b
  1122. mov v29.16b, v30.16b
  1123. mov v30.16b, v31.16b
  1124. mov v31.16b, v4.16b
  1125. mov v4.16b, v5.16b
  1126. mov v5.16b, v6.16b
  1127. mov v6.16b, v7.16b
  1128. mov v7.16b, v8.16b
  1129. mov v8.16b, v9.16b
  1130. mov v9.16b, v10.16b
  1131. mov v10.16b, v11.16b
  1132. .endm/*}}}*/
  1133. /* Dedicated function wrapper for the fetch macro, for the cases where
  1134. * performance isn't that important, to keep code size down.
  1135. */
  1136. PRIVATE(fetch_generic_asm)
  1137. stp x10, x11, [sp, #-16]!
  1138. fetch
  1139. ldp x10, x11, [sp], #16
  1140. ret
  1141. END(fetch_generic_asm)
  1142. /* Fetch the next (16 - (x10 & 15)) columns of data, avoiding reading memory
  1143. * beyond that limit, and filling the rest of the vector with the last legal
  1144. * pixel.
  1145. * Result is in v10 and v11. v8 and v9 are filled with the first legal pixel.
  1146. * Note: This function can read beyond the right edge of input if the image is
  1147. * narrower than 16 bytes.
  1148. */
  1149. PRIVATE(fetch_clampleft1)
  1150. stp x29, x30, [sp, #-16]!
  1151. bl fetch_generic_asm
  1152. dup v8.8h, v10.h[0]
  1153. dup v9.8h, v10.h[0]
  1154. ands x12, x10, #15
  1155. beq 1f
  1156. sub x1, x1, x12
  1157. sub x15, x15, x12
  1158. sub x19, x19, x12
  1159. sub x10, x10, x12
  1160. sub x12, sp, x12, LSL #1
  1161. sub sp, sp, #64
  1162. sub x12, x12, #32
  1163. st1 {v8.8h, v9.8h, v10.8h,v11.8h}, [sp]
  1164. ld1 {v10.8h,v11.8h}, [x12]
  1165. add sp, sp, #64
  1166. 1: ldp x29, x30, [sp], #16
  1167. ret
  1168. END(fetch_clampleft1)
  1169. PRIVATE(fetch_clampleft4)
  1170. stp x29, x30, [sp, #-16]!
  1171. bl fetch_generic_asm
  1172. dup v8.2d, v10.d[0]
  1173. dup v9.2d, v10.d[0]
  1174. ands x12, x10, #15
  1175. beq 1f
  1176. sub x1, x1, x12
  1177. sub x15, x15, x12
  1178. sub x19, x19, x12
  1179. sub x10, x10, x12
  1180. sub x12, sp, x12, LSL #1
  1181. sub sp, sp, #64
  1182. sub x12, x12, #32
  1183. st1 {v8.8h, v9.8h, v10.8h,v11.8h}, [sp]
  1184. ld1 {v10.8h,v11.8h}, [x12]
  1185. add sp, sp, #64
  1186. 1: ldp x29, x30, [sp], #16
  1187. ret
  1188. END(fetch_clampleft4)
  1189. /* Fetch only the next (x11 & 15) (where 0 means 16) columns of data, avoiding
  1190. * reading memory beyond that limit, and filling the rest of the vector with
  1191. * the last legal pixel.
  1192. * Result is in v10 and v11. v12 and v13 are filled with the last legal pixel.
  1193. * Note: This function can read beyond the left edge of input if the image is
  1194. * narrower than 16 bytes.
  1195. */
  1196. PRIVATE(fetch_clampright1)
  1197. stp x29, x30, [sp, #-16]!
  1198. sub x12, xzr, x11
  1199. ands x12, x12, #15
  1200. beq 1f
  1201. sub x1, x1, x12
  1202. sub x15, x15, x12
  1203. sub x19, x19, x12
  1204. bl fetch_generic_asm
  1205. dup v12.8h, v11.h[7]
  1206. dup v13.8h, v11.h[7]
  1207. sub x12, xzr, x11
  1208. and x12, x12, #15
  1209. sub sp, sp, #64
  1210. add x12, sp, x12, LSL #1
  1211. st1 {v10.8h,v11.8h,v12.8h,v13.8h}, [sp]
  1212. ld1 {v10.8h,v11.8h}, [x12]
  1213. add sp, sp, #64
  1214. ldp x29, x30, [sp], #16
  1215. ret
  1216. 1: bl fetch_generic_asm
  1217. dup v12.8h, v11.h[7]
  1218. dup v13.8h, v11.h[7]
  1219. ldp x29, x30, [sp], #16
  1220. ret
  1221. END(fetch_clampright1)
  1222. PRIVATE(fetch_clampright4)
  1223. stp x29, x30, [sp, #-16]!
  1224. sub x12, xzr, x11
  1225. ands x12, x12, #15
  1226. beq 1f
  1227. sub x1, x1, x12
  1228. sub x15, x15, x12
  1229. sub x19, x19, x12
  1230. bl fetch_generic_asm
  1231. dup v12.2d, v11.d[1]
  1232. dup v13.2d, v11.d[1]
  1233. sub x12, xzr, x11
  1234. and x12, x12, #15
  1235. sub sp, sp, #64
  1236. add x12, sp, x12, LSL #1
  1237. st1 {v10.8h,v11.8h,v12.8h,v13.8h}, [sp]
  1238. ld1 {v10.8h,v11.8h}, [x12]
  1239. add sp, sp, #64
  1240. ldp x29, x30, [sp], #16
  1241. ret
  1242. 1: bl fetch_generic_asm
  1243. dup v12.2d, v11.d[1]
  1244. dup v13.2d, v11.d[1]
  1245. ldp x29, x30, [sp], #16
  1246. ret
  1247. END(fetch_clampright4)
  1248. /* Given values in v10 and v11, and an index in x11, sweep the (x11 & 15)th
  1249. * value across to fill the rest of the register pair. Used for filling the
  1250. * right hand edge of the window when reading too close to the right hand edge
  1251. * of the image.
  1252. * Also returns a dup-ed copy of the last element in v12 for the tail-fill
  1253. * case (this happens incidentally in common path, but must be done
  1254. * deliberately in the fast-out path).
  1255. */
  1256. PRIVATE(prefill_sweepright1)
  1257. ands x12, x11, #15
  1258. beq 1f
  1259. sub x12, x12, #1
  1260. sub sp, sp, #64
  1261. st1 {v10.8h,v11.8h}, [sp]
  1262. add x12, sp, x12, LSL #1
  1263. ld1r {v12.8h}, [x12]
  1264. ld1r {v13.8h}, [x12]
  1265. st1 {v12.8h,v13.8h}, [x12]
  1266. ld1 {v10.8h,v11.8h}, [sp]
  1267. add sp, sp, #64
  1268. ret
  1269. 1: dup v12.8h, v11.h[7]
  1270. dup v13.8h, v11.h[7]
  1271. ret
  1272. END(prefill_sweepright1)
  1273. PRIVATE(prefill_sweepright4)
  1274. ands x12, x11, #15
  1275. beq 1f
  1276. sub x12, x12, #4
  1277. sub sp, sp, #64
  1278. st1 {v10.8h,v11.8h}, [sp]
  1279. add x12, sp, x12, LSL #1
  1280. ld1r {v12.2d}, [x12]
  1281. st1 {v13.8h}, [x12]
  1282. ld1 {v10.8h,v11.8h}, [sp]
  1283. add sp, sp, #64
  1284. ret
  1285. 1: dup v12.2d, v11.d[1]
  1286. dup v13.2d, v11.d[1]
  1287. ret
  1288. END(prefill_sweepright4)
  1289. /* The main loop keeps a sliding window of data that has already been convolved
  1290. * in the vertical axis for the current line. This usually stays in the
  1291. * register file, but spills to memory for large windows. The first thing that
  1292. * needs to be done at start-up is to fill this window with image data, taking
  1293. * into account the padding needed if the left or right edges of the image fall
  1294. * within this window.
  1295. */
  1296. /* Because the window is in the register file writes to it cannot be indexed
  1297. * by another register. Consequently the fill loops are unrolled to address
  1298. * the registers directly. This macro distinguishes between writes to the
  1299. * register file and writes to the spill buffer (indicated by a destination
  1300. * register named xx).
  1301. */
  1302. .macro prefill_out ra, rb, sra, srb
  1303. .ifc \ra,xx
  1304. .ifc \rb,xx
  1305. st1 {\sra,\srb}, [x9], #32
  1306. .else
  1307. bic x9, x9, #0x40
  1308. st1 {\sra}, [x9], #16
  1309. mov \rb, \srb
  1310. .endif
  1311. .else
  1312. .ifnc \ra,\sra
  1313. mov \ra, \sra
  1314. .endif
  1315. .ifnc \rb,\srb
  1316. mov \rb, \srb
  1317. .endif
  1318. .endif
  1319. .endm
  1320. /* This macro provides the list of registers representing the window, and the
  1321. * cases where the register file is too small and a spill buffer is used
  1322. * instead.
  1323. * Since several specialisations of each function are generated, this also
  1324. * culls superfluous iterations, and sets the variable `i` for subsequent
  1325. * macros indicating the current index into the window.
  1326. */
  1327. .macro prefill_list, macro, nextmacro, max_r, step, label
  1328. .macro ifneeded macro, nextmacro, line, nextline, ra, rb, step, label
  1329. .if windowsize >= (\line * 16)
  1330. .set i, windowsize - (\line * 16)
  1331. \label\macro\line:
  1332. prefill_\macro \label\nextmacro\line, \label\nextmacro\nextline, \ra, \rb, \step
  1333. .endif
  1334. .endm
  1335. ifneeded \macro \nextmacro, 13, 12, xx, xx, \step, \label
  1336. ifneeded \macro \nextmacro, 12, 11, xx, xx, \step, \label
  1337. ifneeded \macro \nextmacro, 11, 10, xx, v17.16b, \step, \label
  1338. ifneeded \macro \nextmacro, 10, 9, v18.16b, v19.16b, \step, \label
  1339. ifneeded \macro \nextmacro, 9, 8, v20.16b, v21.16b, \step, \label
  1340. ifneeded \macro \nextmacro, 8, 7, v22.16b, v23.16b, \step, \label
  1341. ifneeded \macro \nextmacro, 7, 6, v24.16b, v25.16b, \step, \label
  1342. ifneeded \macro \nextmacro, 6, 5, v26.16b, v27.16b, \step, \label
  1343. ifneeded \macro \nextmacro, 5, 4, v28.16b, v29.16b, \step, \label
  1344. ifneeded \macro \nextmacro, 4, 3, v30.16b, v31.16b, \step, \label
  1345. ifneeded \macro \nextmacro, 3, 2, v4.16b, v5.16b, \step, \label
  1346. ifneeded \macro \nextmacro, 2, 1, v6.16b, v7.16b, \step, \label
  1347. ifneeded \macro \nextmacro, 1, 0, v8.16b, v9.16b, \step, \label
  1348. \label\macro\()0:
  1349. b \label\()_end
  1350. .purgem ifneeded
  1351. .endm
  1352. /* These macros represent the possible stages of filling the window.
  1353. * Each macro is unrolled enough times that it can fill the entire window
  1354. * itself, but normally it will have to hand control to subsequent macros
  1355. * part-way through and this is done using labels named \next and \after, where
  1356. * \next is the next macro starting at the same window position and \after is
  1357. * the next macro starting after the current window position.
  1358. */
  1359. /* leftfill: v8 and v9 contain the left padding value. While the window
  1360. * extends outside of the image on the left-hand side, and at least 16 more
  1361. * padding values are needed in the window, store v8 and v9 into the window.
  1362. * Otherwise skip forward to storing image data.
  1363. */
  1364. .macro prefill_leftfill, next, after, ra, rb, step
  1365. cmp x10, #i+16
  1366. blo \next
  1367. prefill_out \ra, \rb, v8.16b, v9.16b
  1368. .endm
  1369. /* leftedge: The very first non-fill or partial-fill chunk from the image is
  1370. * already loaded (as it was used to calculate the left padding value), so
  1371. * store it here, and then drop into the regular load/store cycle in the next
  1372. * macro.
  1373. */
  1374. .macro prefill_leftedge, next, after, ra, rb, step
  1375. 1: prefill_out \ra, \rb, v10.16b, v11.16b
  1376. b \after
  1377. .endm
  1378. /* dofetch: Copy chunks of the image into the window without any complications
  1379. * from edge conditions.
  1380. */
  1381. .macro prefill_dofetch, next, after, ra, rb, step
  1382. cmp x11, #i+16
  1383. bls \next
  1384. bl fetch_generic_asm
  1385. prefill_out \ra, \rb, v10.16b, v11.16b
  1386. .endm
  1387. /* rightedge: The last fetch (currently in v10 and v11) may have gone beyond
  1388. * the right-hand edge of the image. In that case sweep the last valid pixel
  1389. * across the rest of the chunk, and in either case prepare padding data in v12
  1390. * and v13 for the next macro. This is done in fetch_clampright.
  1391. * This only happens once before going on to the next macro.
  1392. * Sometimes leftedge also covers the rightedge case, in which case this has
  1393. * to be skipped altogether.
  1394. */
  1395. .macro prefill_rightedge, next, after, ra, rb, step
  1396. cmp x11, #i
  1397. bls \next
  1398. bl fetch_clampright\step
  1399. prefill_out \ra, \rb, v10.16b, v11.16b
  1400. b \after
  1401. .endm
  1402. /* rightfill: The rest of the window is simply filled with right padding from
  1403. * v12 and v13.
  1404. */
  1405. .macro prefill_rightfill, next, after, ra, rb, step
  1406. prefill_out \ra, \rb, v12.16b, v13.16b
  1407. .endm
  1408. /* Here all of the macros above are unrolled and laid out in the proper order.
  1409. */
  1410. .macro prefill_body, max_r, step, label
  1411. prefill_list leftfill, leftedge, \max_r, \step, \label
  1412. prefill_list leftedge, dofetch, \max_r, \step, \label
  1413. prefill_list dofetch, rightedge, \max_r, \step, \label
  1414. prefill_list rightedge, rightfill, \max_r, \step, \label
  1415. prefill_list rightfill, oops, \max_r, \step, \label
  1416. \label\()_end:
  1417. .endm
  1418. /* Fill the convolution window with context data. The aim here is to load
  1419. * exactly 2*r columns, and in the main loop to read as many columns as will be
  1420. * written. This is complicated by the window being divided into chunks at
  1421. * register boundaries, and the need to handle cases when the input starts very
  1422. * close to the left or right (or both) edges of the image and the need to fill
  1423. * the spaces that leaves with left and right edge padding values.
  1424. *
  1425. * Input:
  1426. * x1 -- src
  1427. * x2 -- pitch
  1428. * x3 -- count
  1429. * x4 -- available image data right of src pointer
  1430. * x5 -- r
  1431. * x6 -- rup
  1432. * x7 -- rdn
  1433. * x8 -- available image data left of src pointer
  1434. * x9 -- buffer (if needed)
  1435. * x13 = -pitch
  1436. * x15 = top-row in
  1437. * x19 = bottom-row in
  1438. * Output:
  1439. * x4 -= min(inlen, count + windowsize - centertap)
  1440. * x1 += min(inlen, count + windowsize - centertap)
  1441. * x15 += min(inlen, count + windowsize - centertap)
  1442. * x19 += min(inlen, count + windowsize - centertap)
  1443. * Modifies:
  1444. * x10 -- fill start index in the window
  1445. * x11 -- fill stop index in the window
  1446. * x12 -- scratch
  1447. */
  1448. .macro prefill step=1, max_r=25, label=xx
  1449. .set windowsize, (((\max_r + \max_r) * \step + 15) & ~15)
  1450. .set centertap, (windowsize - \max_r * \step)
  1451. mov x10, #centertap
  1452. subs x10, x10, x8
  1453. csel x10, xzr, x10, lo
  1454. subs x11, x4, #windowsize - centertap
  1455. csel x11, xzr, x11, hs
  1456. add x11, x11, #windowsize
  1457. /* x10 indicates where in the window legal image data begins.
  1458. * x11 indicates where in the window legal image date ends.
  1459. * When starting near the centre of a large image these would be
  1460. * zero and windowsize respectively, but when starting near the
  1461. * edges this can change.
  1462. * When starting on the leftmost pixel, x10 will be centertap.
  1463. * When starting on the rightmost pixel, x11 will be centertap+1.
  1464. */
  1465. /* x4 indicates how much data there is between the current pointers
  1466. * and the right edge of the image. The pointers currently point
  1467. * to the data needed at centertap. The subsequent code will
  1468. * consume (windowsize - x10) data, but only the data from
  1469. * centertap to windowsize comes out of x4's budget.
  1470. */
  1471. 1: subs x4, x4, #windowsize - centertap
  1472. csel x4, xzr, x4, lo
  1473. /* And the pointers need to rewind to the start of the window.
  1474. */
  1475. sub x1, x1, #centertap
  1476. sub x15, x15, #centertap
  1477. sub x19, x19, #centertap
  1478. /* Unless x8 indicated that there wasn't that much data available.
  1479. */
  1480. add x1, x1, x10
  1481. add x15, x15, x10
  1482. add x19, x19, x10
  1483. /* Get the first chunk, and add padding to align it to the window
  1484. * if necessary.
  1485. */
  1486. bl fetch_clampleft\step
  1487. /* Sometimes the start and the end of the window are in the same
  1488. * chunk. In that case both ends need filler at the outset.
  1489. */
  1490. sub x12, x11, #1
  1491. eor x12, x10, x12
  1492. cmp x12, #16
  1493. bhs 1f
  1494. bl prefill_sweepright\step
  1495. /* Iterate through all the points in the window and fill them in
  1496. * with padding or image data as needed.
  1497. */
  1498. 1: prefill_body \max_r, \step, \label
  1499. .endm
  1500. /* The main body of the convolve functions. Having already pre-filled the
  1501. * convolution window with 2*r input values, the logic settles into a regular
  1502. * pattern of reading and writing at a 1:1 rate until either input or output
  1503. * expires. The input leads the output by r values, so when processing all the
  1504. * way to the right-hand edge, or within r pixels of that edge, the input will
  1505. * run out first. In the case of very narrow images, or sub-windows starting
  1506. * near the right edge, the input may already have run out while the
  1507. * convolution window was being filled and this loop will start with a
  1508. * zero-length input.
  1509. *
  1510. * Once the input runs out, the rest of the output must be processed by padding
  1511. * the remainder of the window with pad value from the last valid pixel from
  1512. * the source.
  1513. *
  1514. * Input:
  1515. * x0 = dst
  1516. * x1 = src
  1517. * x2 = pitch
  1518. * x3 = count
  1519. * x4 = inlen
  1520. * x5 = r
  1521. * x6 = rup
  1522. * x7 = rdn
  1523. * x9 = buffer
  1524. * x13 = -pitch
  1525. * x15 = top-row in
  1526. * x19 = bottom-row in
  1527. * Modifies
  1528. * x8 = fetch code pointer
  1529. */
  1530. .macro conv_body core, step=1, max_r=25, labelc="", labelnc=""
  1531. /* If x4 >= x3 then there's no need for clipping. The main loop
  1532. * needs to exit when either x3 or x4 runs out, so clamp x4 to be
  1533. * no greater than x3 and use x4 for the loop.
  1534. * However, if x4 comes out of the loop with less than 16 bytes
  1535. * left, a partial read would be necessary to avoid reading beyond
  1536. * the end of the image. To avoid this, clamp x4 to the next
  1537. * multiple of 16, which is still sufficient to force it out of the
  1538. * loop but doesn't imply a rewind.
  1539. */
  1540. add x12, x3, #15
  1541. bic x12, x12, #15
  1542. cmp x4, x12
  1543. csel x4, x12, x4, hi
  1544. /* First calculate the entry-point into the internal fetch logic.
  1545. * This is done so the same function can service several kernel
  1546. * sizes.
  1547. */
  1548. adrp x8, \labelnc
  1549. add x8, x8, #:lo12:\labelnc
  1550. sub x8, x8, x5, LSL #5
  1551. sub x8, x8, x5, LSL #3
  1552. cmp x5, x6
  1553. ccmp x5, x7, #0, eq
  1554. beq 5f
  1555. /* if (r != rup || r != rdn) then the address-clamping table should
  1556. * be used rather than the short-cut version.
  1557. */
  1558. adrp x8, \labelc
  1559. add x8, x8, #:lo12:\labelc
  1560. sub x8, x8, x5, LSL #6
  1561. add x8, x8, x5, LSL #3
  1562. b 5f
  1563. /* Main loop: ... */
  1564. .align 4
  1565. 3: /* first perform a vertical convolution from memory to get the next
  1566. * 16 taps of the horizontal window into the register file...
  1567. */
  1568. fetch max_r=\max_r, labelc=\labelc, labelnc=\labelnc, reg=x8
  1569. /* ...then perform a horizontal convolution on that window to
  1570. * produce eight output bytes, and slide the window along.
  1571. * This has to be done twice to match the 16-way vertical pass.
  1572. * It would be preferable to have twice the work done in \core, but
  1573. * that would demand yet another variant on those macros and would
  1574. * perturb the register allocation severely.
  1575. */
  1576. \core
  1577. st1 {v15.8b}, [x0], #8
  1578. \core
  1579. st1 {v15.8b}, [x0], #8
  1580. sub x3, x3, #16
  1581. 5: subs x4, x4, #16
  1582. bhi 3b
  1583. /* Here there's 16 or fewer bytes available before the edge of the
  1584. * source image. x4 holds that count minus 16 (because it was
  1585. * decremented before the first iteration ran). The last read may
  1586. * not be a whole chunk, and beyond that a fill value must be used.
  1587. *
  1588. * Of course, none of that matters if there's no more output to
  1589. * produce...
  1590. */
  1591. cbz x3, 5f
  1592. /* Oh well. */
  1593. adds x4, x4, #16
  1594. bne 1f
  1595. .if \step==1
  1596. dup v10.8h, v9.h[7]
  1597. dup v11.8h, v9.h[7]
  1598. .else
  1599. dup v10.2d, v9.d[1]
  1600. dup v11.2d, v9.d[1]
  1601. .endif
  1602. b 3f
  1603. /* To avoid reading past end of input, rewind pointers by (16-x4)
  1604. * to ensure that they're exactly 16 bytes from the edge.
  1605. */
  1606. 1: mov x11, x4
  1607. bl fetch_clampright\step
  1608. /* Now to put this padding to use, perform any remaining
  1609. * iterations. This is done at half the rate of the main loop,
  1610. * because there's no longer pressure from a 16-lane window filler.
  1611. */
  1612. 3: \core
  1613. .if \step==1
  1614. dup v11.8h, v11.h[7]
  1615. .else
  1616. dup v11.2d, v11.d[1]
  1617. .endif
  1618. subs x3, x3, #8
  1619. blo 4f
  1620. st1 {v15.8b}, [x0], #8
  1621. bne 3b
  1622. b 5f
  1623. /* If the final iteration contained 0 < l < 8 values, then perform
  1624. * a piecewise store of the final vector.
  1625. */
  1626. 4: tbz x3, #2, 1f
  1627. st1 {v15.s}[0], [x0], #4
  1628. ext v15.8b, v15.8b, v15.8b, #4
  1629. 1: tbz x3, #1, 1f
  1630. st1 {v15.h}[0], [x0], #2
  1631. ext v15.8b, v15.8b, v15.8b, #2
  1632. 1: tbz x3, #0, 5f
  1633. st1 {v15.b}[0], [x0], #1
  1634. ext v15.8b, v15.8b, v15.8b, #1
  1635. 5: mov x0, #0
  1636. .endm
  1637. .irp r, TUNED_LIST1, 25
  1638. PRIVATE(convolve1_\r)
  1639. stp x29,x30, [sp, #-16]!
  1640. prefill step=1, max_r=\r, label=.Lcnv1_\r
  1641. conv_body core=hconv1_\r, step=1, max_r=\r, labelc=.Lcnv1_\r, labelnc=.Lcnvnc1_\r
  1642. ldp x29,x30, [sp], #16
  1643. ret
  1644. END(convolve1_\r)
  1645. .endr
  1646. .irp r, TUNED_LIST4, 25
  1647. PRIVATE(convolve4_\r)
  1648. sub x9, sp, #0x40
  1649. stp x29,x30, [sp, #-(16 + 0x40 + 0x80)]!
  1650. bic x9, x9, #0x7f
  1651. /* x9 now points to a 0x40 byte buffer on the stack whose address
  1652. * has the low 7 bits clear. This allows easy address calculation
  1653. * in the wrap-around cases.
  1654. */
  1655. prefill step=4, max_r=\r, label=.Lcnv4_\r
  1656. conv_body core=hconv4_\r, step=4, max_r=\r, labelc=.Lcnv4_\r, labelnc=.Lcnvnc4_\r
  1657. ldp x29,x30, [sp], #(16 + 0x40 + 0x80)
  1658. ret
  1659. END(convolve4_\r)
  1660. .endr
  1661. /* void rsdIntrinsicBlurU1_K(
  1662. * void *out, // x0
  1663. * void *in, // x1
  1664. * size_t w, // x2
  1665. * size_t h, // x3
  1666. * size_t p, // x4
  1667. * size_t x, // x5
  1668. * size_t y, // x6
  1669. * size_t count, // x7
  1670. * size_t r, // [sp]
  1671. * uint16_t *tab); // [sp,#8]
  1672. */
  1673. ENTRY(rsdIntrinsicBlurU1_K)
  1674. stp x19,x30, [sp, #-16]!
  1675. sub x8, sp, #32
  1676. sub sp, sp, #64
  1677. st1 {v8.1d - v11.1d}, [sp]
  1678. st1 {v12.1d - v15.1d}, [x8]
  1679. mov x8, x5 // x
  1680. ldr w5, [sp,#80] // r
  1681. sub x9, x2, x8 // w - x
  1682. sub x10, x3, x6 // h - y
  1683. mov x2, x4 // pitch
  1684. mov x3, x7 // count
  1685. sub x7, x10, #1 // h - y - 1
  1686. mov x4, x9 // inlen = (w - x)
  1687. ldr x12, [sp, #88] // tab
  1688. add x1, x1, x8 // src += x
  1689. cmp x6, x5
  1690. csel x6, x5, x6, hs // rup = min(r, y)
  1691. cmp x7, x5
  1692. csel x7, x5, x7, hs // rdn = min(r, h - y - 1)
  1693. sub x13, xzr, x2 // -pitch
  1694. msub x15, x2, x6, x1
  1695. madd x19, x2, x7, x1
  1696. ld1 {v0.8h,v1.8h}, [x12], #32
  1697. ld1 {v2.8h,v3.8h}, [x12], #32
  1698. adr x30, 1f
  1699. .irp r, TUNED_LIST1
  1700. cmp x5, #\r
  1701. bls convolve1_\r
  1702. .endr
  1703. b convolve1_25
  1704. 1: ld1 {v8.1d - v11.1d}, [sp], #32
  1705. ld1 {v12.1d - v15.1d}, [sp], #32
  1706. ldp x19,x30, [sp], #16
  1707. ret
  1708. END(rsdIntrinsicBlurU1_K)
  1709. /* void rsdIntrinsicBlurU4_K(
  1710. * void *out, // x0
  1711. * void *in, // x1
  1712. * size_t w, // x2
  1713. * size_t h, // x3
  1714. * size_t p, // x4
  1715. * size_t x, // x5
  1716. * size_t y, // x6
  1717. * size_t count, // x7
  1718. * size_t r, // [sp]
  1719. * uint16_t *tab); // [sp,#8]
  1720. */
  1721. ENTRY(rsdIntrinsicBlurU4_K)
  1722. stp x19,x30, [sp, #-16]!
  1723. sub x8, sp, #32
  1724. sub sp, sp, #64
  1725. st1 {v8.1d - v11.1d}, [sp]
  1726. st1 {v12.1d - v15.1d}, [x8]
  1727. lsl x8, x5, #2 // x
  1728. lsl x2, x2, #2
  1729. ldr w5, [sp,#80] // r
  1730. sub x9, x2, x8 // w - x
  1731. sub x10, x3, x6 // h - y
  1732. mov x2, x4 // pitch
  1733. lsl x3, x7, #2 // count
  1734. sub x7, x10, #1 // h - y - 1
  1735. mov x4, x9 // inlen = (w - x)
  1736. ldr x12, [sp, #88]
  1737. add x1, x1, x8 // in += x
  1738. cmp x6, x5
  1739. csel x6, x5, x6, hs // rup = min(r, y)
  1740. cmp x7, x5
  1741. csel x7, x5, x7, hs // rdn = min(r, h - y - 1)
  1742. sub x13, xzr, x2
  1743. msub x15, x2, x6, x1
  1744. madd x19, x2, x7, x1
  1745. ld1 {v0.8h,v1.8h}, [x12], #32
  1746. ld1 {v2.8h,v3.8h}, [x12], #32
  1747. adr x30, 1f
  1748. .irp r, TUNED_LIST4
  1749. cmp x5, #\r
  1750. bls convolve4_\r
  1751. .endr
  1752. b convolve4_25
  1753. 1: ld1 {v8.1d - v11.1d}, [sp], #32
  1754. ld1 {v12.1d - v15.1d}, [sp], #32
  1755. ldp x19,x30, [sp], #16
  1756. ret
  1757. END(rsdIntrinsicBlurU4_K)