am437x-vpfe.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765
  1. /*
  2. * TI VPFE capture Driver
  3. *
  4. * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
  5. *
  6. * Benoit Parrot <[email protected]>
  7. * Lad, Prabhakar <[email protected]>
  8. *
  9. * This program is free software; you may redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  14. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  15. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  16. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  17. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  18. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  19. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. * SOFTWARE.
  21. */
  22. #include <linux/delay.h>
  23. #include <linux/err.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/pinctrl/consumer.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/slab.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/videodev2.h>
  34. #include <media/v4l2-common.h>
  35. #include <media/v4l2-ctrls.h>
  36. #include <media/v4l2-event.h>
  37. #include <media/v4l2-of.h>
  38. #include "am437x-vpfe.h"
  39. #define VPFE_MODULE_NAME "vpfe"
  40. #define VPFE_VERSION "0.1.0"
  41. static int debug;
  42. module_param(debug, int, 0644);
  43. MODULE_PARM_DESC(debug, "Debug level 0-8");
  44. #define vpfe_dbg(level, dev, fmt, arg...) \
  45. v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg)
  46. #define vpfe_info(dev, fmt, arg...) \
  47. v4l2_info(&dev->v4l2_dev, fmt, ##arg)
  48. #define vpfe_err(dev, fmt, arg...) \
  49. v4l2_err(&dev->v4l2_dev, fmt, ##arg)
  50. /* standard information */
  51. struct vpfe_standard {
  52. v4l2_std_id std_id;
  53. unsigned int width;
  54. unsigned int height;
  55. struct v4l2_fract pixelaspect;
  56. int frame_format;
  57. };
  58. static const struct vpfe_standard vpfe_standards[] = {
  59. {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
  60. {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
  61. };
  62. struct bus_format {
  63. unsigned int width;
  64. unsigned int bpp;
  65. };
  66. /*
  67. * struct vpfe_fmt - VPFE media bus format information
  68. * @name: V4L2 format description
  69. * @code: V4L2 media bus format code
  70. * @shifted: V4L2 media bus format code for the same pixel layout but
  71. * shifted to be 8 bits per pixel. =0 if format is not shiftable.
  72. * @pixelformat: V4L2 pixel format FCC identifier
  73. * @width: Bits per pixel (when transferred over a bus)
  74. * @bpp: Bytes per pixel (when stored in memory)
  75. * @supported: Indicates format supported by subdev
  76. */
  77. struct vpfe_fmt {
  78. const char *name;
  79. u32 fourcc;
  80. u32 code;
  81. struct bus_format l;
  82. struct bus_format s;
  83. bool supported;
  84. u32 index;
  85. };
  86. static struct vpfe_fmt formats[] = {
  87. {
  88. .name = "YUV 4:2:2 packed, YCbYCr",
  89. .fourcc = V4L2_PIX_FMT_YUYV,
  90. .code = MEDIA_BUS_FMT_YUYV8_2X8,
  91. .l.width = 10,
  92. .l.bpp = 4,
  93. .s.width = 8,
  94. .s.bpp = 2,
  95. .supported = false,
  96. }, {
  97. .name = "YUV 4:2:2 packed, CbYCrY",
  98. .fourcc = V4L2_PIX_FMT_UYVY,
  99. .code = MEDIA_BUS_FMT_UYVY8_2X8,
  100. .l.width = 10,
  101. .l.bpp = 4,
  102. .s.width = 8,
  103. .s.bpp = 2,
  104. .supported = false,
  105. }, {
  106. .name = "YUV 4:2:2 packed, YCrYCb",
  107. .fourcc = V4L2_PIX_FMT_YVYU,
  108. .code = MEDIA_BUS_FMT_YVYU8_2X8,
  109. .l.width = 10,
  110. .l.bpp = 4,
  111. .s.width = 8,
  112. .s.bpp = 2,
  113. .supported = false,
  114. }, {
  115. .name = "YUV 4:2:2 packed, CrYCbY",
  116. .fourcc = V4L2_PIX_FMT_VYUY,
  117. .code = MEDIA_BUS_FMT_VYUY8_2X8,
  118. .l.width = 10,
  119. .l.bpp = 4,
  120. .s.width = 8,
  121. .s.bpp = 2,
  122. .supported = false,
  123. }, {
  124. .name = "RAW8 BGGR",
  125. .fourcc = V4L2_PIX_FMT_SBGGR8,
  126. .code = MEDIA_BUS_FMT_SBGGR8_1X8,
  127. .l.width = 10,
  128. .l.bpp = 2,
  129. .s.width = 8,
  130. .s.bpp = 1,
  131. .supported = false,
  132. }, {
  133. .name = "RAW8 GBRG",
  134. .fourcc = V4L2_PIX_FMT_SGBRG8,
  135. .code = MEDIA_BUS_FMT_SGBRG8_1X8,
  136. .l.width = 10,
  137. .l.bpp = 2,
  138. .s.width = 8,
  139. .s.bpp = 1,
  140. .supported = false,
  141. }, {
  142. .name = "RAW8 GRBG",
  143. .fourcc = V4L2_PIX_FMT_SGRBG8,
  144. .code = MEDIA_BUS_FMT_SGRBG8_1X8,
  145. .l.width = 10,
  146. .l.bpp = 2,
  147. .s.width = 8,
  148. .s.bpp = 1,
  149. .supported = false,
  150. }, {
  151. .name = "RAW8 RGGB",
  152. .fourcc = V4L2_PIX_FMT_SRGGB8,
  153. .code = MEDIA_BUS_FMT_SRGGB8_1X8,
  154. .l.width = 10,
  155. .l.bpp = 2,
  156. .s.width = 8,
  157. .s.bpp = 1,
  158. .supported = false,
  159. }, {
  160. .name = "RGB565 (LE)",
  161. .fourcc = V4L2_PIX_FMT_RGB565,
  162. .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
  163. .l.width = 10,
  164. .l.bpp = 4,
  165. .s.width = 8,
  166. .s.bpp = 2,
  167. .supported = false,
  168. }, {
  169. .name = "RGB565 (BE)",
  170. .fourcc = V4L2_PIX_FMT_RGB565X,
  171. .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
  172. .l.width = 10,
  173. .l.bpp = 4,
  174. .s.width = 8,
  175. .s.bpp = 2,
  176. .supported = false,
  177. },
  178. };
  179. static int
  180. __vpfe_get_format(struct vpfe_device *vpfe,
  181. struct v4l2_format *format, unsigned int *bpp);
  182. static struct vpfe_fmt *find_format_by_code(unsigned int code)
  183. {
  184. struct vpfe_fmt *fmt;
  185. unsigned int k;
  186. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  187. fmt = &formats[k];
  188. if (fmt->code == code)
  189. return fmt;
  190. }
  191. return NULL;
  192. }
  193. static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
  194. {
  195. struct vpfe_fmt *fmt;
  196. unsigned int k;
  197. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  198. fmt = &formats[k];
  199. if (fmt->fourcc == pixelformat)
  200. return fmt;
  201. }
  202. return NULL;
  203. }
  204. static void
  205. mbus_to_pix(struct vpfe_device *vpfe,
  206. const struct v4l2_mbus_framefmt *mbus,
  207. struct v4l2_pix_format *pix, unsigned int *bpp)
  208. {
  209. struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
  210. unsigned int bus_width = sdinfo->vpfe_param.bus_width;
  211. struct vpfe_fmt *fmt;
  212. fmt = find_format_by_code(mbus->code);
  213. if (WARN_ON(fmt == NULL)) {
  214. pr_err("Invalid mbus code set\n");
  215. *bpp = 1;
  216. return;
  217. }
  218. memset(pix, 0, sizeof(*pix));
  219. v4l2_fill_pix_format(pix, mbus);
  220. pix->pixelformat = fmt->fourcc;
  221. *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
  222. /* pitch should be 32 bytes aligned */
  223. pix->bytesperline = ALIGN(pix->width * *bpp, 32);
  224. pix->sizeimage = pix->bytesperline * pix->height;
  225. }
  226. static void pix_to_mbus(struct vpfe_device *vpfe,
  227. struct v4l2_pix_format *pix_fmt,
  228. struct v4l2_mbus_framefmt *mbus_fmt)
  229. {
  230. struct vpfe_fmt *fmt;
  231. fmt = find_format_by_pix(pix_fmt->pixelformat);
  232. if (!fmt) {
  233. /* default to first entry */
  234. vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
  235. pix_fmt->pixelformat);
  236. fmt = &formats[0];
  237. }
  238. memset(mbus_fmt, 0, sizeof(*mbus_fmt));
  239. v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
  240. }
  241. /* Print Four-character-code (FOURCC) */
  242. static char *print_fourcc(u32 fmt)
  243. {
  244. static char code[5];
  245. code[0] = (unsigned char)(fmt & 0xff);
  246. code[1] = (unsigned char)((fmt >> 8) & 0xff);
  247. code[2] = (unsigned char)((fmt >> 16) & 0xff);
  248. code[3] = (unsigned char)((fmt >> 24) & 0xff);
  249. code[4] = '\0';
  250. return code;
  251. }
  252. static int
  253. cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
  254. {
  255. return lhs->type == rhs->type &&
  256. lhs->fmt.pix.width == rhs->fmt.pix.width &&
  257. lhs->fmt.pix.height == rhs->fmt.pix.height &&
  258. lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
  259. lhs->fmt.pix.field == rhs->fmt.pix.field &&
  260. lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
  261. lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
  262. lhs->fmt.pix.quantization == rhs->fmt.pix.quantization &&
  263. lhs->fmt.pix.xfer_func == rhs->fmt.pix.xfer_func;
  264. }
  265. static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
  266. {
  267. return ioread32(ccdc->ccdc_cfg.base_addr + offset);
  268. }
  269. static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset)
  270. {
  271. iowrite32(val, ccdc->ccdc_cfg.base_addr + offset);
  272. }
  273. static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
  274. {
  275. return container_of(ccdc, struct vpfe_device, ccdc);
  276. }
  277. static inline
  278. struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb)
  279. {
  280. return container_of(vb, struct vpfe_cap_buffer, vb);
  281. }
  282. static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag)
  283. {
  284. vpfe_reg_write(ccdc, !!flag, VPFE_PCR);
  285. }
  286. static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag)
  287. {
  288. unsigned int cfg;
  289. if (!flag) {
  290. cfg = vpfe_reg_read(ccdc, VPFE_CONFIG);
  291. cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT);
  292. } else {
  293. cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT;
  294. }
  295. vpfe_reg_write(ccdc, cfg, VPFE_CONFIG);
  296. }
  297. static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
  298. struct v4l2_rect *image_win,
  299. enum ccdc_frmfmt frm_fmt,
  300. int bpp)
  301. {
  302. int horz_start, horz_nr_pixels;
  303. int vert_start, vert_nr_lines;
  304. int val, mid_img;
  305. /*
  306. * ppc - per pixel count. indicates how many pixels per cell
  307. * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
  308. * raw capture this is 1
  309. */
  310. horz_start = image_win->left * bpp;
  311. horz_nr_pixels = (image_win->width * bpp) - 1;
  312. vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) |
  313. horz_nr_pixels, VPFE_HORZ_INFO);
  314. vert_start = image_win->top;
  315. if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
  316. vert_nr_lines = (image_win->height >> 1) - 1;
  317. vert_start >>= 1;
  318. /* Since first line doesn't have any data */
  319. vert_start += 1;
  320. /* configure VDINT0 */
  321. val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
  322. } else {
  323. /* Since first line doesn't have any data */
  324. vert_start += 1;
  325. vert_nr_lines = image_win->height - 1;
  326. /*
  327. * configure VDINT0 and VDINT1. VDINT1 will be at half
  328. * of image height
  329. */
  330. mid_img = vert_start + (image_win->height / 2);
  331. val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) |
  332. (mid_img & VPFE_VDINT_VDINT1_MASK);
  333. }
  334. vpfe_reg_write(ccdc, val, VPFE_VDINT);
  335. vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) |
  336. vert_start, VPFE_VERT_START);
  337. vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES);
  338. }
  339. static void vpfe_reg_dump(struct vpfe_ccdc *ccdc)
  340. {
  341. struct vpfe_device *vpfe = to_vpfe(ccdc);
  342. vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW));
  343. vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP));
  344. vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB));
  345. vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP));
  346. vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN));
  347. vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST));
  348. vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n",
  349. vpfe_reg_read(ccdc, VPFE_SYNMODE));
  350. vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n",
  351. vpfe_reg_read(ccdc, VPFE_HSIZE_OFF));
  352. vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n",
  353. vpfe_reg_read(ccdc, VPFE_HORZ_INFO));
  354. vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n",
  355. vpfe_reg_read(ccdc, VPFE_VERT_START));
  356. vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n",
  357. vpfe_reg_read(ccdc, VPFE_VERT_LINES));
  358. }
  359. static int
  360. vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
  361. struct vpfe_ccdc_config_params_raw *ccdcparam)
  362. {
  363. struct vpfe_device *vpfe = to_vpfe(ccdc);
  364. u8 max_gamma, max_data;
  365. if (!ccdcparam->alaw.enable)
  366. return 0;
  367. max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
  368. max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
  369. if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
  370. ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
  371. max_gamma > max_data) {
  372. vpfe_dbg(1, vpfe, "Invalid data line select\n");
  373. return -EINVAL;
  374. }
  375. return 0;
  376. }
  377. static void
  378. vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc,
  379. struct vpfe_ccdc_config_params_raw *raw_params)
  380. {
  381. struct vpfe_ccdc_config_params_raw *config_params =
  382. &ccdc->ccdc_cfg.bayer.config_params;
  383. *config_params = *raw_params;
  384. }
  385. /*
  386. * vpfe_ccdc_restore_defaults()
  387. * This function will write defaults to all CCDC registers
  388. */
  389. static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
  390. {
  391. int i;
  392. /* Disable CCDC */
  393. vpfe_pcr_enable(ccdc, 0);
  394. /* set all registers to default value */
  395. for (i = 4; i <= 0x94; i += 4)
  396. vpfe_reg_write(ccdc, 0, i);
  397. vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING);
  398. vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW);
  399. }
  400. static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
  401. {
  402. int dma_cntl, i, pcr;
  403. /* If the CCDC module is still busy wait for it to be done */
  404. for (i = 0; i < 10; i++) {
  405. usleep_range(5000, 6000);
  406. pcr = vpfe_reg_read(ccdc, VPFE_PCR);
  407. if (!pcr)
  408. break;
  409. /* make sure it it is disabled */
  410. vpfe_pcr_enable(ccdc, 0);
  411. }
  412. /* Disable CCDC by resetting all register to default POR values */
  413. vpfe_ccdc_restore_defaults(ccdc);
  414. /* if DMA_CNTL overflow bit is set. Clear it
  415. * It appears to take a while for this to become quiescent ~20ms
  416. */
  417. for (i = 0; i < 10; i++) {
  418. dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
  419. if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
  420. break;
  421. /* Clear the overflow bit */
  422. vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
  423. usleep_range(5000, 6000);
  424. }
  425. /* Disabled the module at the CONFIG level */
  426. vpfe_config_enable(ccdc, 0);
  427. pm_runtime_put_sync(dev);
  428. return 0;
  429. }
  430. static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
  431. {
  432. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  433. struct vpfe_ccdc_config_params_raw raw_params;
  434. int x;
  435. if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER)
  436. return -EINVAL;
  437. x = copy_from_user(&raw_params, params, sizeof(raw_params));
  438. if (x) {
  439. vpfe_dbg(1, vpfe,
  440. "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
  441. x);
  442. return -EFAULT;
  443. }
  444. if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) {
  445. vpfe_ccdc_update_raw_params(ccdc, &raw_params);
  446. return 0;
  447. }
  448. return -EINVAL;
  449. }
  450. /*
  451. * vpfe_ccdc_config_ycbcr()
  452. * This function will configure CCDC for YCbCr video capture
  453. */
  454. static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
  455. {
  456. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  457. struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
  458. u32 syn_mode;
  459. vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
  460. /*
  461. * first restore the CCDC registers to default values
  462. * This is important since we assume default values to be set in
  463. * a lot of registers that we didn't touch
  464. */
  465. vpfe_ccdc_restore_defaults(ccdc);
  466. /*
  467. * configure pixel format, frame format, configure video frame
  468. * format, enable output to SDRAM, enable internal timing generator
  469. * and 8bit pack mode
  470. */
  471. syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) <<
  472. VPFE_SYN_MODE_INPMOD_SHIFT) |
  473. ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) <<
  474. VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE |
  475. VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE);
  476. /* setup BT.656 sync mode */
  477. if (params->bt656_enable) {
  478. vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF);
  479. /*
  480. * configure the FID, VD, HD pin polarity,
  481. * fld,hd pol positive, vd negative, 8-bit data
  482. */
  483. syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE;
  484. if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
  485. syn_mode |= VPFE_SYN_MODE_10BITS;
  486. else
  487. syn_mode |= VPFE_SYN_MODE_8BITS;
  488. } else {
  489. /* y/c external sync mode */
  490. syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) <<
  491. VPFE_FID_POL_SHIFT) |
  492. ((params->hd_pol & VPFE_HD_POL_MASK) <<
  493. VPFE_HD_POL_SHIFT) |
  494. ((params->vd_pol & VPFE_VD_POL_MASK) <<
  495. VPFE_VD_POL_SHIFT));
  496. }
  497. vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
  498. /* configure video window */
  499. vpfe_ccdc_setwin(ccdc, &params->win,
  500. params->frm_fmt, params->bytesperpixel);
  501. /*
  502. * configure the order of y cb cr in SDRAM, and disable latch
  503. * internal register on vsync
  504. */
  505. if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
  506. vpfe_reg_write(ccdc,
  507. (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
  508. VPFE_LATCH_ON_VSYNC_DISABLE |
  509. VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG);
  510. else
  511. vpfe_reg_write(ccdc,
  512. (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
  513. VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
  514. /*
  515. * configure the horizontal line offset. This should be a
  516. * on 32 byte boundary. So clear LSB 5 bits
  517. */
  518. vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
  519. /* configure the memory line offset */
  520. if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
  521. /* two fields are interleaved in memory */
  522. vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED,
  523. VPFE_SDOFST);
  524. }
  525. static void
  526. vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc,
  527. struct vpfe_ccdc_black_clamp *bclamp)
  528. {
  529. u32 val;
  530. if (!bclamp->enable) {
  531. /* configure DCSub */
  532. val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK;
  533. vpfe_reg_write(ccdc, val, VPFE_DCSUB);
  534. vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP);
  535. return;
  536. }
  537. /*
  538. * Configure gain, Start pixel, No of line to be avg,
  539. * No of pixel/line to be avg, & Enable the Black clamping
  540. */
  541. val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) |
  542. ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) <<
  543. VPFE_BLK_ST_PXL_SHIFT) |
  544. ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) <<
  545. VPFE_BLK_SAMPLE_LINE_SHIFT) |
  546. ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) <<
  547. VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE);
  548. vpfe_reg_write(ccdc, val, VPFE_CLAMP);
  549. /* If Black clamping is enable then make dcsub 0 */
  550. vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB);
  551. }
  552. static void
  553. vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
  554. struct vpfe_ccdc_black_compensation *bcomp)
  555. {
  556. u32 val;
  557. val = ((bcomp->b & VPFE_BLK_COMP_MASK) |
  558. ((bcomp->gb & VPFE_BLK_COMP_MASK) <<
  559. VPFE_BLK_COMP_GB_COMP_SHIFT) |
  560. ((bcomp->gr & VPFE_BLK_COMP_MASK) <<
  561. VPFE_BLK_COMP_GR_COMP_SHIFT) |
  562. ((bcomp->r & VPFE_BLK_COMP_MASK) <<
  563. VPFE_BLK_COMP_R_COMP_SHIFT));
  564. vpfe_reg_write(ccdc, val, VPFE_BLKCMP);
  565. }
  566. /*
  567. * vpfe_ccdc_config_raw()
  568. * This function will configure CCDC for Raw capture mode
  569. */
  570. static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
  571. {
  572. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  573. struct vpfe_ccdc_config_params_raw *config_params =
  574. &ccdc->ccdc_cfg.bayer.config_params;
  575. struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
  576. unsigned int syn_mode;
  577. unsigned int val;
  578. vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
  579. /* Reset CCDC */
  580. vpfe_ccdc_restore_defaults(ccdc);
  581. /* Disable latching function registers on VSYNC */
  582. vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
  583. /*
  584. * Configure the vertical sync polarity(SYN_MODE.VDPOL),
  585. * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
  586. * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
  587. * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
  588. * SDRAM, enable internal timing generator
  589. */
  590. syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) |
  591. ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) |
  592. ((params->fid_pol & VPFE_FID_POL_MASK) <<
  593. VPFE_FID_POL_SHIFT) | ((params->frm_fmt &
  594. VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) |
  595. ((config_params->data_sz & VPFE_DATA_SZ_MASK) <<
  596. VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt &
  597. VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) |
  598. VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE);
  599. /* Enable and configure aLaw register if needed */
  600. if (config_params->alaw.enable) {
  601. val = ((config_params->alaw.gamma_wd &
  602. VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE);
  603. vpfe_reg_write(ccdc, val, VPFE_ALAW);
  604. vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val);
  605. }
  606. /* Configure video window */
  607. vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt,
  608. params->bytesperpixel);
  609. /* Configure Black Clamp */
  610. vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp);
  611. /* Configure Black level compensation */
  612. vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp);
  613. /* If data size is 8 bit then pack the data */
  614. if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) ||
  615. config_params->alaw.enable)
  616. syn_mode |= VPFE_DATA_PACK_ENABLE;
  617. /*
  618. * Configure Horizontal offset register. If pack 8 is enabled then
  619. * 1 pixel will take 1 byte
  620. */
  621. vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
  622. vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n",
  623. params->bytesperline, params->bytesperline);
  624. /* Set value for SDOFST */
  625. if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
  626. if (params->image_invert_enable) {
  627. /* For interlace inverse mode */
  628. vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT,
  629. VPFE_SDOFST);
  630. } else {
  631. /* For interlace non inverse mode */
  632. vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT,
  633. VPFE_SDOFST);
  634. }
  635. } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
  636. vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT,
  637. VPFE_SDOFST);
  638. }
  639. vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
  640. vpfe_reg_dump(ccdc);
  641. }
  642. static inline int
  643. vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc,
  644. enum ccdc_buftype buf_type)
  645. {
  646. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  647. ccdc->ccdc_cfg.bayer.buf_type = buf_type;
  648. else
  649. ccdc->ccdc_cfg.ycbcr.buf_type = buf_type;
  650. return 0;
  651. }
  652. static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
  653. {
  654. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  655. return ccdc->ccdc_cfg.bayer.buf_type;
  656. return ccdc->ccdc_cfg.ycbcr.buf_type;
  657. }
  658. static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
  659. {
  660. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  661. vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
  662. ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
  663. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  664. ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
  665. /*
  666. * Need to clear it in case it was left on
  667. * after the last capture.
  668. */
  669. ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0;
  670. switch (pixfmt) {
  671. case V4L2_PIX_FMT_SBGGR8:
  672. ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1;
  673. break;
  674. case V4L2_PIX_FMT_YUYV:
  675. case V4L2_PIX_FMT_UYVY:
  676. case V4L2_PIX_FMT_YUV420:
  677. case V4L2_PIX_FMT_NV12:
  678. case V4L2_PIX_FMT_RGB565X:
  679. break;
  680. case V4L2_PIX_FMT_SBGGR16:
  681. default:
  682. return -EINVAL;
  683. }
  684. } else {
  685. switch (pixfmt) {
  686. case V4L2_PIX_FMT_YUYV:
  687. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
  688. break;
  689. case V4L2_PIX_FMT_UYVY:
  690. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
  691. break;
  692. default:
  693. return -EINVAL;
  694. }
  695. }
  696. return 0;
  697. }
  698. static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc)
  699. {
  700. u32 pixfmt;
  701. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  702. pixfmt = V4L2_PIX_FMT_YUYV;
  703. } else {
  704. if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
  705. pixfmt = V4L2_PIX_FMT_YUYV;
  706. else
  707. pixfmt = V4L2_PIX_FMT_UYVY;
  708. }
  709. return pixfmt;
  710. }
  711. static int
  712. vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc,
  713. struct v4l2_rect *win, unsigned int bpp)
  714. {
  715. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  716. ccdc->ccdc_cfg.bayer.win = *win;
  717. ccdc->ccdc_cfg.bayer.bytesperpixel = bpp;
  718. ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32);
  719. } else {
  720. ccdc->ccdc_cfg.ycbcr.win = *win;
  721. ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp;
  722. ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32);
  723. }
  724. return 0;
  725. }
  726. static inline void
  727. vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc,
  728. struct v4l2_rect *win)
  729. {
  730. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  731. *win = ccdc->ccdc_cfg.bayer.win;
  732. else
  733. *win = ccdc->ccdc_cfg.ycbcr.win;
  734. }
  735. static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc)
  736. {
  737. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  738. return ccdc->ccdc_cfg.bayer.bytesperline;
  739. return ccdc->ccdc_cfg.ycbcr.bytesperline;
  740. }
  741. static inline int
  742. vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc,
  743. enum ccdc_frmfmt frm_fmt)
  744. {
  745. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  746. ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt;
  747. else
  748. ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
  749. return 0;
  750. }
  751. static inline enum ccdc_frmfmt
  752. vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc)
  753. {
  754. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  755. return ccdc->ccdc_cfg.bayer.frm_fmt;
  756. return ccdc->ccdc_cfg.ycbcr.frm_fmt;
  757. }
  758. static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc)
  759. {
  760. return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1;
  761. }
  762. static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
  763. {
  764. vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR);
  765. }
  766. static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
  767. struct vpfe_hw_if_param *params)
  768. {
  769. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  770. ccdc->ccdc_cfg.if_type = params->if_type;
  771. switch (params->if_type) {
  772. case VPFE_BT656:
  773. case VPFE_YCBCR_SYNC_16:
  774. case VPFE_YCBCR_SYNC_8:
  775. case VPFE_BT656_10BIT:
  776. ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol;
  777. ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol;
  778. break;
  779. case VPFE_RAW_BAYER:
  780. ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol;
  781. ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol;
  782. if (params->bus_width == 10)
  783. ccdc->ccdc_cfg.bayer.config_params.data_sz =
  784. VPFE_CCDC_DATA_10BITS;
  785. else
  786. ccdc->ccdc_cfg.bayer.config_params.data_sz =
  787. VPFE_CCDC_DATA_8BITS;
  788. vpfe_dbg(1, vpfe, "params.bus_width: %d\n",
  789. params->bus_width);
  790. vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n",
  791. ccdc->ccdc_cfg.bayer.config_params.data_sz);
  792. break;
  793. default:
  794. return -EINVAL;
  795. }
  796. return 0;
  797. }
  798. static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint)
  799. {
  800. unsigned int vpfe_int_status;
  801. vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
  802. switch (vdint) {
  803. /* VD0 interrupt */
  804. case VPFE_VDINT0:
  805. vpfe_int_status &= ~VPFE_VDINT0;
  806. vpfe_int_status |= VPFE_VDINT0;
  807. break;
  808. /* VD1 interrupt */
  809. case VPFE_VDINT1:
  810. vpfe_int_status &= ~VPFE_VDINT1;
  811. vpfe_int_status |= VPFE_VDINT1;
  812. break;
  813. /* VD2 interrupt */
  814. case VPFE_VDINT2:
  815. vpfe_int_status &= ~VPFE_VDINT2;
  816. vpfe_int_status |= VPFE_VDINT2;
  817. break;
  818. /* Clear all interrupts */
  819. default:
  820. vpfe_int_status &= ~(VPFE_VDINT0 |
  821. VPFE_VDINT1 |
  822. VPFE_VDINT2);
  823. vpfe_int_status |= (VPFE_VDINT0 |
  824. VPFE_VDINT1 |
  825. VPFE_VDINT2);
  826. break;
  827. }
  828. /* Clear specific VDINT from the status register */
  829. vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS);
  830. vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
  831. /* Acknowledge that we are done with all interrupts */
  832. vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI);
  833. }
  834. static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc)
  835. {
  836. ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER;
  837. ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
  838. ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED;
  839. ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
  840. ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
  841. ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
  842. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
  843. ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED;
  844. ccdc->ccdc_cfg.ycbcr.win.left = 0;
  845. ccdc->ccdc_cfg.ycbcr.win.top = 0;
  846. ccdc->ccdc_cfg.ycbcr.win.width = 720;
  847. ccdc->ccdc_cfg.ycbcr.win.height = 576;
  848. ccdc->ccdc_cfg.ycbcr.bt656_enable = 1;
  849. ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
  850. ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
  851. ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
  852. ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
  853. ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
  854. ccdc->ccdc_cfg.bayer.win.left = 0;
  855. ccdc->ccdc_cfg.bayer.win.top = 0;
  856. ccdc->ccdc_cfg.bayer.win.width = 800;
  857. ccdc->ccdc_cfg.bayer.win.height = 600;
  858. ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS;
  859. ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd =
  860. VPFE_CCDC_GAMMA_BITS_09_0;
  861. }
  862. /*
  863. * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
  864. */
  865. static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
  866. struct v4l2_format *f)
  867. {
  868. struct v4l2_rect image_win;
  869. enum ccdc_buftype buf_type;
  870. enum ccdc_frmfmt frm_fmt;
  871. memset(f, 0, sizeof(*f));
  872. f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  873. vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
  874. f->fmt.pix.width = image_win.width;
  875. f->fmt.pix.height = image_win.height;
  876. f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
  877. f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
  878. f->fmt.pix.height;
  879. buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc);
  880. f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc);
  881. frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  882. if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
  883. f->fmt.pix.field = V4L2_FIELD_NONE;
  884. } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
  885. if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
  886. f->fmt.pix.field = V4L2_FIELD_INTERLACED;
  887. } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) {
  888. f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
  889. } else {
  890. vpfe_err(vpfe, "Invalid buf_type\n");
  891. return -EINVAL;
  892. }
  893. } else {
  894. vpfe_err(vpfe, "Invalid frm_fmt\n");
  895. return -EINVAL;
  896. }
  897. return 0;
  898. }
  899. static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
  900. {
  901. enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
  902. int ret = 0;
  903. vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
  904. vpfe_dbg(1, vpfe, "pixelformat: %s\n",
  905. print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
  906. if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc,
  907. vpfe->fmt.fmt.pix.pixelformat) < 0) {
  908. vpfe_err(vpfe, "couldn't set pix format in ccdc\n");
  909. return -EINVAL;
  910. }
  911. /* configure the image window */
  912. vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
  913. switch (vpfe->fmt.fmt.pix.field) {
  914. case V4L2_FIELD_INTERLACED:
  915. /* do nothing, since it is default */
  916. ret = vpfe_ccdc_set_buftype(
  917. &vpfe->ccdc,
  918. CCDC_BUFTYPE_FLD_INTERLEAVED);
  919. break;
  920. case V4L2_FIELD_NONE:
  921. frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
  922. /* buffer type only applicable for interlaced scan */
  923. break;
  924. case V4L2_FIELD_SEQ_TB:
  925. ret = vpfe_ccdc_set_buftype(
  926. &vpfe->ccdc,
  927. CCDC_BUFTYPE_FLD_SEPARATED);
  928. break;
  929. default:
  930. return -EINVAL;
  931. }
  932. if (ret)
  933. return ret;
  934. return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt);
  935. }
  936. /*
  937. * vpfe_config_image_format()
  938. * For a given standard, this functions sets up the default
  939. * pix format & crop values in the vpfe device and ccdc. It first
  940. * starts with defaults based values from the standard table.
  941. * It then checks if sub device supports get_fmt and then override the
  942. * values based on that.Sets crop values to match with scan resolution
  943. * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
  944. * values in ccdc
  945. */
  946. static int vpfe_config_image_format(struct vpfe_device *vpfe,
  947. v4l2_std_id std_id)
  948. {
  949. struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
  950. int i, ret;
  951. for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
  952. if (vpfe_standards[i].std_id & std_id) {
  953. vpfe->std_info.active_pixels =
  954. vpfe_standards[i].width;
  955. vpfe->std_info.active_lines =
  956. vpfe_standards[i].height;
  957. vpfe->std_info.frame_format =
  958. vpfe_standards[i].frame_format;
  959. vpfe->std_index = i;
  960. break;
  961. }
  962. }
  963. if (i == ARRAY_SIZE(vpfe_standards)) {
  964. vpfe_err(vpfe, "standard not supported\n");
  965. return -EINVAL;
  966. }
  967. vpfe->crop.top = vpfe->crop.left = 0;
  968. vpfe->crop.width = vpfe->std_info.active_pixels;
  969. vpfe->crop.height = vpfe->std_info.active_lines;
  970. pix->width = vpfe->crop.width;
  971. pix->height = vpfe->crop.height;
  972. pix->pixelformat = V4L2_PIX_FMT_YUYV;
  973. /* first field and frame format based on standard frame format */
  974. if (vpfe->std_info.frame_format)
  975. pix->field = V4L2_FIELD_INTERLACED;
  976. else
  977. pix->field = V4L2_FIELD_NONE;
  978. ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
  979. if (ret)
  980. return ret;
  981. /* Update the crop window based on found values */
  982. vpfe->crop.width = pix->width;
  983. vpfe->crop.height = pix->height;
  984. return vpfe_config_ccdc_image_format(vpfe);
  985. }
  986. static int vpfe_initialize_device(struct vpfe_device *vpfe)
  987. {
  988. struct vpfe_subdev_info *sdinfo;
  989. int ret;
  990. sdinfo = &vpfe->cfg->sub_devs[0];
  991. sdinfo->sd = vpfe->sd[0];
  992. vpfe->current_input = 0;
  993. vpfe->std_index = 0;
  994. /* Configure the default format information */
  995. ret = vpfe_config_image_format(vpfe,
  996. vpfe_standards[vpfe->std_index].std_id);
  997. if (ret)
  998. return ret;
  999. pm_runtime_get_sync(vpfe->pdev);
  1000. vpfe_config_enable(&vpfe->ccdc, 1);
  1001. vpfe_ccdc_restore_defaults(&vpfe->ccdc);
  1002. /* Clear all VPFE interrupts */
  1003. vpfe_clear_intr(&vpfe->ccdc, -1);
  1004. return ret;
  1005. }
  1006. /*
  1007. * vpfe_release : This function is based on the vb2_fop_release
  1008. * helper function.
  1009. * It has been augmented to handle module power management,
  1010. * by disabling/enabling h/w module fcntl clock when necessary.
  1011. */
  1012. static int vpfe_release(struct file *file)
  1013. {
  1014. struct vpfe_device *vpfe = video_drvdata(file);
  1015. bool fh_singular;
  1016. int ret;
  1017. mutex_lock(&vpfe->lock);
  1018. /* Save the singular status before we call the clean-up helper */
  1019. fh_singular = v4l2_fh_is_singular_file(file);
  1020. /* the release helper will cleanup any on-going streaming */
  1021. ret = _vb2_fop_release(file, NULL);
  1022. /*
  1023. * If this was the last open file.
  1024. * Then de-initialize hw module.
  1025. */
  1026. if (fh_singular)
  1027. vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
  1028. mutex_unlock(&vpfe->lock);
  1029. return ret;
  1030. }
  1031. /*
  1032. * vpfe_open : This function is based on the v4l2_fh_open helper function.
  1033. * It has been augmented to handle module power management,
  1034. * by disabling/enabling h/w module fcntl clock when necessary.
  1035. */
  1036. static int vpfe_open(struct file *file)
  1037. {
  1038. struct vpfe_device *vpfe = video_drvdata(file);
  1039. int ret;
  1040. mutex_lock(&vpfe->lock);
  1041. ret = v4l2_fh_open(file);
  1042. if (ret) {
  1043. vpfe_err(vpfe, "v4l2_fh_open failed\n");
  1044. goto unlock;
  1045. }
  1046. if (!v4l2_fh_is_singular_file(file))
  1047. goto unlock;
  1048. if (vpfe_initialize_device(vpfe)) {
  1049. v4l2_fh_release(file);
  1050. ret = -ENODEV;
  1051. }
  1052. unlock:
  1053. mutex_unlock(&vpfe->lock);
  1054. return ret;
  1055. }
  1056. /**
  1057. * vpfe_schedule_next_buffer: set next buffer address for capture
  1058. * @vpfe : ptr to vpfe device
  1059. *
  1060. * This function will get next buffer from the dma queue and
  1061. * set the buffer address in the vpfe register for capture.
  1062. * the buffer is marked active
  1063. *
  1064. * Assumes caller is holding vpfe->dma_queue_lock already
  1065. */
  1066. static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
  1067. {
  1068. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1069. struct vpfe_cap_buffer, list);
  1070. list_del(&vpfe->next_frm->list);
  1071. vpfe_set_sdr_addr(&vpfe->ccdc,
  1072. vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
  1073. }
  1074. static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
  1075. {
  1076. unsigned long addr;
  1077. addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
  1078. vpfe->field_off;
  1079. vpfe_set_sdr_addr(&vpfe->ccdc, addr);
  1080. }
  1081. /*
  1082. * vpfe_process_buffer_complete: process a completed buffer
  1083. * @vpfe : ptr to vpfe device
  1084. *
  1085. * This function time stamp the buffer and mark it as DONE. It also
  1086. * wake up any process waiting on the QUEUE and set the next buffer
  1087. * as current
  1088. */
  1089. static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
  1090. {
  1091. vpfe->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
  1092. vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field;
  1093. vpfe->cur_frm->vb.sequence = vpfe->sequence++;
  1094. vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
  1095. vpfe->cur_frm = vpfe->next_frm;
  1096. }
  1097. /*
  1098. * vpfe_isr : ISR handler for vpfe capture (VINT0)
  1099. * @irq: irq number
  1100. * @dev_id: dev_id ptr
  1101. *
  1102. * It changes status of the captured buffer, takes next buffer from the queue
  1103. * and sets its address in VPFE registers
  1104. */
  1105. static irqreturn_t vpfe_isr(int irq, void *dev)
  1106. {
  1107. struct vpfe_device *vpfe = (struct vpfe_device *)dev;
  1108. enum v4l2_field field;
  1109. int intr_status;
  1110. int fid;
  1111. intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
  1112. if (intr_status & VPFE_VDINT0) {
  1113. field = vpfe->fmt.fmt.pix.field;
  1114. if (field == V4L2_FIELD_NONE) {
  1115. /* handle progressive frame capture */
  1116. if (vpfe->cur_frm != vpfe->next_frm)
  1117. vpfe_process_buffer_complete(vpfe);
  1118. goto next_intr;
  1119. }
  1120. /* interlaced or TB capture check which field
  1121. we are in hardware */
  1122. fid = vpfe_ccdc_getfid(&vpfe->ccdc);
  1123. /* switch the software maintained field id */
  1124. vpfe->field ^= 1;
  1125. if (fid == vpfe->field) {
  1126. /* we are in-sync here,continue */
  1127. if (fid == 0) {
  1128. /*
  1129. * One frame is just being captured. If the
  1130. * next frame is available, release the
  1131. * current frame and move on
  1132. */
  1133. if (vpfe->cur_frm != vpfe->next_frm)
  1134. vpfe_process_buffer_complete(vpfe);
  1135. /*
  1136. * based on whether the two fields are stored
  1137. * interleave or separately in memory,
  1138. * reconfigure the CCDC memory address
  1139. */
  1140. if (field == V4L2_FIELD_SEQ_TB)
  1141. vpfe_schedule_bottom_field(vpfe);
  1142. goto next_intr;
  1143. }
  1144. /*
  1145. * if one field is just being captured configure
  1146. * the next frame get the next frame from the empty
  1147. * queue if no frame is available hold on to the
  1148. * current buffer
  1149. */
  1150. spin_lock(&vpfe->dma_queue_lock);
  1151. if (!list_empty(&vpfe->dma_queue) &&
  1152. vpfe->cur_frm == vpfe->next_frm)
  1153. vpfe_schedule_next_buffer(vpfe);
  1154. spin_unlock(&vpfe->dma_queue_lock);
  1155. } else if (fid == 0) {
  1156. /*
  1157. * out of sync. Recover from any hardware out-of-sync.
  1158. * May loose one frame
  1159. */
  1160. vpfe->field = fid;
  1161. }
  1162. }
  1163. next_intr:
  1164. if (intr_status & VPFE_VDINT1) {
  1165. spin_lock(&vpfe->dma_queue_lock);
  1166. if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
  1167. !list_empty(&vpfe->dma_queue) &&
  1168. vpfe->cur_frm == vpfe->next_frm)
  1169. vpfe_schedule_next_buffer(vpfe);
  1170. spin_unlock(&vpfe->dma_queue_lock);
  1171. }
  1172. vpfe_clear_intr(&vpfe->ccdc, intr_status);
  1173. return IRQ_HANDLED;
  1174. }
  1175. static inline void vpfe_detach_irq(struct vpfe_device *vpfe)
  1176. {
  1177. unsigned int intr = VPFE_VDINT0;
  1178. enum ccdc_frmfmt frame_format;
  1179. frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  1180. if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
  1181. intr |= VPFE_VDINT1;
  1182. vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR);
  1183. }
  1184. static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
  1185. {
  1186. unsigned int intr = VPFE_VDINT0;
  1187. enum ccdc_frmfmt frame_format;
  1188. frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  1189. if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
  1190. intr |= VPFE_VDINT1;
  1191. vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET);
  1192. }
  1193. static int vpfe_querycap(struct file *file, void *priv,
  1194. struct v4l2_capability *cap)
  1195. {
  1196. struct vpfe_device *vpfe = video_drvdata(file);
  1197. vpfe_dbg(2, vpfe, "vpfe_querycap\n");
  1198. strlcpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
  1199. strlcpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
  1200. snprintf(cap->bus_info, sizeof(cap->bus_info),
  1201. "platform:%s", vpfe->v4l2_dev.name);
  1202. cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
  1203. V4L2_CAP_READWRITE;
  1204. cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
  1205. return 0;
  1206. }
  1207. /* get the format set at output pad of the adjacent subdev */
  1208. static int __vpfe_get_format(struct vpfe_device *vpfe,
  1209. struct v4l2_format *format, unsigned int *bpp)
  1210. {
  1211. struct v4l2_mbus_framefmt mbus_fmt;
  1212. struct vpfe_subdev_info *sdinfo;
  1213. struct v4l2_subdev_format fmt;
  1214. int ret;
  1215. sdinfo = vpfe->current_subdev;
  1216. if (!sdinfo->sd)
  1217. return -EINVAL;
  1218. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1219. fmt.pad = 0;
  1220. ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
  1221. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1222. return ret;
  1223. if (!ret) {
  1224. v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
  1225. mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
  1226. } else {
  1227. ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
  1228. sdinfo->grp_id,
  1229. pad, get_fmt,
  1230. NULL, &fmt);
  1231. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1232. return ret;
  1233. v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
  1234. mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
  1235. }
  1236. format->type = vpfe->fmt.type;
  1237. vpfe_dbg(1, vpfe,
  1238. "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
  1239. __func__, format->fmt.pix.width, format->fmt.pix.height,
  1240. print_fourcc(format->fmt.pix.pixelformat),
  1241. format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
  1242. return 0;
  1243. }
  1244. /* set the format at output pad of the adjacent subdev */
  1245. static int __vpfe_set_format(struct vpfe_device *vpfe,
  1246. struct v4l2_format *format, unsigned int *bpp)
  1247. {
  1248. struct vpfe_subdev_info *sdinfo;
  1249. struct v4l2_subdev_format fmt;
  1250. int ret;
  1251. vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
  1252. sdinfo = vpfe->current_subdev;
  1253. if (!sdinfo->sd)
  1254. return -EINVAL;
  1255. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1256. fmt.pad = 0;
  1257. pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
  1258. ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
  1259. if (ret)
  1260. return ret;
  1261. v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
  1262. mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
  1263. format->type = vpfe->fmt.type;
  1264. vpfe_dbg(1, vpfe,
  1265. "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
  1266. __func__, format->fmt.pix.width, format->fmt.pix.height,
  1267. print_fourcc(format->fmt.pix.pixelformat),
  1268. format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
  1269. return 0;
  1270. }
  1271. static int vpfe_g_fmt(struct file *file, void *priv,
  1272. struct v4l2_format *fmt)
  1273. {
  1274. struct vpfe_device *vpfe = video_drvdata(file);
  1275. vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
  1276. *fmt = vpfe->fmt;
  1277. return 0;
  1278. }
  1279. static int vpfe_enum_fmt(struct file *file, void *priv,
  1280. struct v4l2_fmtdesc *f)
  1281. {
  1282. struct vpfe_device *vpfe = video_drvdata(file);
  1283. struct vpfe_subdev_info *sdinfo;
  1284. struct vpfe_fmt *fmt = NULL;
  1285. unsigned int k;
  1286. vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
  1287. f->index);
  1288. sdinfo = vpfe->current_subdev;
  1289. if (!sdinfo->sd)
  1290. return -EINVAL;
  1291. if (f->index > ARRAY_SIZE(formats))
  1292. return -EINVAL;
  1293. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  1294. if (formats[k].index == f->index) {
  1295. fmt = &formats[k];
  1296. break;
  1297. }
  1298. }
  1299. if (!fmt)
  1300. return -EINVAL;
  1301. strncpy(f->description, fmt->name, sizeof(f->description) - 1);
  1302. f->pixelformat = fmt->fourcc;
  1303. f->type = vpfe->fmt.type;
  1304. vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s [%s]\n",
  1305. f->index, fmt->code, print_fourcc(fmt->fourcc), fmt->name);
  1306. return 0;
  1307. }
  1308. static int vpfe_try_fmt(struct file *file, void *priv,
  1309. struct v4l2_format *fmt)
  1310. {
  1311. struct vpfe_device *vpfe = video_drvdata(file);
  1312. unsigned int bpp;
  1313. vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
  1314. return __vpfe_get_format(vpfe, fmt, &bpp);
  1315. }
  1316. static int vpfe_s_fmt(struct file *file, void *priv,
  1317. struct v4l2_format *fmt)
  1318. {
  1319. struct vpfe_device *vpfe = video_drvdata(file);
  1320. struct v4l2_format format;
  1321. unsigned int bpp;
  1322. int ret;
  1323. vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
  1324. /* If streaming is started, return error */
  1325. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1326. vpfe_err(vpfe, "%s device busy\n", __func__);
  1327. return -EBUSY;
  1328. }
  1329. ret = __vpfe_get_format(vpfe, &format, &bpp);
  1330. if (ret)
  1331. return ret;
  1332. if (!cmp_v4l2_format(fmt, &format)) {
  1333. /* Sensor format is different from the requested format
  1334. * so we need to change it
  1335. */
  1336. ret = __vpfe_set_format(vpfe, fmt, &bpp);
  1337. if (ret)
  1338. return ret;
  1339. } else /* Just make sure all of the fields are consistent */
  1340. *fmt = format;
  1341. /* First detach any IRQ if currently attached */
  1342. vpfe_detach_irq(vpfe);
  1343. vpfe->fmt = *fmt;
  1344. vpfe->bpp = bpp;
  1345. /* Update the crop window based on found values */
  1346. vpfe->crop.width = fmt->fmt.pix.width;
  1347. vpfe->crop.height = fmt->fmt.pix.height;
  1348. /* set image capture parameters in the ccdc */
  1349. return vpfe_config_ccdc_image_format(vpfe);
  1350. }
  1351. static int vpfe_enum_size(struct file *file, void *priv,
  1352. struct v4l2_frmsizeenum *fsize)
  1353. {
  1354. struct vpfe_device *vpfe = video_drvdata(file);
  1355. struct v4l2_subdev_frame_size_enum fse;
  1356. struct vpfe_subdev_info *sdinfo;
  1357. struct v4l2_mbus_framefmt mbus;
  1358. struct v4l2_pix_format pix;
  1359. struct vpfe_fmt *fmt;
  1360. int ret;
  1361. vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
  1362. /* check for valid format */
  1363. fmt = find_format_by_pix(fsize->pixel_format);
  1364. if (!fmt) {
  1365. vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
  1366. fsize->pixel_format);
  1367. return -EINVAL;
  1368. }
  1369. memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
  1370. sdinfo = vpfe->current_subdev;
  1371. if (!sdinfo->sd)
  1372. return -EINVAL;
  1373. memset(&pix, 0x0, sizeof(pix));
  1374. /* Construct pix from parameter and use default for the rest */
  1375. pix.pixelformat = fsize->pixel_format;
  1376. pix.width = 640;
  1377. pix.height = 480;
  1378. pix.colorspace = V4L2_COLORSPACE_SRGB;
  1379. pix.field = V4L2_FIELD_NONE;
  1380. pix_to_mbus(vpfe, &pix, &mbus);
  1381. memset(&fse, 0x0, sizeof(fse));
  1382. fse.index = fsize->index;
  1383. fse.pad = 0;
  1384. fse.code = mbus.code;
  1385. fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1386. ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
  1387. if (ret)
  1388. return -EINVAL;
  1389. vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
  1390. fse.index, fse.code, fse.min_width, fse.max_width,
  1391. fse.min_height, fse.max_height);
  1392. fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
  1393. fsize->discrete.width = fse.max_width;
  1394. fsize->discrete.height = fse.max_height;
  1395. vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
  1396. fsize->index, print_fourcc(fsize->pixel_format),
  1397. fsize->discrete.width, fsize->discrete.height);
  1398. return 0;
  1399. }
  1400. /*
  1401. * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
  1402. * given app input index
  1403. */
  1404. static int
  1405. vpfe_get_subdev_input_index(struct vpfe_device *vpfe,
  1406. int *subdev_index,
  1407. int *subdev_input_index,
  1408. int app_input_index)
  1409. {
  1410. int i, j = 0;
  1411. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1412. if (app_input_index < (j + 1)) {
  1413. *subdev_index = i;
  1414. *subdev_input_index = app_input_index - j;
  1415. return 0;
  1416. }
  1417. j++;
  1418. }
  1419. return -EINVAL;
  1420. }
  1421. /*
  1422. * vpfe_get_app_input - Get app input index for a given subdev input index
  1423. * driver stores the input index of the current sub device and translate it
  1424. * when application request the current input
  1425. */
  1426. static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
  1427. int *app_input_index)
  1428. {
  1429. struct vpfe_config *cfg = vpfe->cfg;
  1430. struct vpfe_subdev_info *sdinfo;
  1431. struct i2c_client *client;
  1432. struct i2c_client *curr_client;
  1433. int i, j = 0;
  1434. curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd);
  1435. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1436. sdinfo = &cfg->sub_devs[i];
  1437. client = v4l2_get_subdevdata(sdinfo->sd);
  1438. if (client->addr == curr_client->addr &&
  1439. client->adapter->nr == curr_client->adapter->nr) {
  1440. if (vpfe->current_input >= 1)
  1441. return -1;
  1442. *app_input_index = j + vpfe->current_input;
  1443. return 0;
  1444. }
  1445. j++;
  1446. }
  1447. return -EINVAL;
  1448. }
  1449. static int vpfe_enum_input(struct file *file, void *priv,
  1450. struct v4l2_input *inp)
  1451. {
  1452. struct vpfe_device *vpfe = video_drvdata(file);
  1453. struct vpfe_subdev_info *sdinfo;
  1454. int subdev, index;
  1455. vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
  1456. if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
  1457. inp->index) < 0) {
  1458. vpfe_dbg(1, vpfe,
  1459. "input information not found for the subdev\n");
  1460. return -EINVAL;
  1461. }
  1462. sdinfo = &vpfe->cfg->sub_devs[subdev];
  1463. *inp = sdinfo->inputs[index];
  1464. return 0;
  1465. }
  1466. static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
  1467. {
  1468. struct vpfe_device *vpfe = video_drvdata(file);
  1469. vpfe_dbg(2, vpfe, "vpfe_g_input\n");
  1470. return vpfe_get_app_input_index(vpfe, index);
  1471. }
  1472. /* Assumes caller is holding vpfe_dev->lock */
  1473. static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
  1474. {
  1475. int subdev_index = 0, inp_index = 0;
  1476. struct vpfe_subdev_info *sdinfo;
  1477. struct vpfe_route *route;
  1478. u32 input, output;
  1479. int ret;
  1480. vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
  1481. /* If streaming is started, return error */
  1482. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1483. vpfe_err(vpfe, "%s device busy\n", __func__);
  1484. return -EBUSY;
  1485. }
  1486. ret = vpfe_get_subdev_input_index(vpfe,
  1487. &subdev_index,
  1488. &inp_index,
  1489. index);
  1490. if (ret < 0) {
  1491. vpfe_err(vpfe, "invalid input index: %d\n", index);
  1492. goto get_out;
  1493. }
  1494. sdinfo = &vpfe->cfg->sub_devs[subdev_index];
  1495. sdinfo->sd = vpfe->sd[subdev_index];
  1496. route = &sdinfo->routes[inp_index];
  1497. if (route && sdinfo->can_route) {
  1498. input = route->input;
  1499. output = route->output;
  1500. if (sdinfo->sd) {
  1501. ret = v4l2_subdev_call(sdinfo->sd, video,
  1502. s_routing, input, output, 0);
  1503. if (ret) {
  1504. vpfe_err(vpfe, "s_routing failed\n");
  1505. ret = -EINVAL;
  1506. goto get_out;
  1507. }
  1508. }
  1509. }
  1510. vpfe->current_subdev = sdinfo;
  1511. if (sdinfo->sd)
  1512. vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler;
  1513. vpfe->current_input = index;
  1514. vpfe->std_index = 0;
  1515. /* set the bus/interface parameter for the sub device in ccdc */
  1516. ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param);
  1517. if (ret)
  1518. return ret;
  1519. /* set the default image parameters in the device */
  1520. return vpfe_config_image_format(vpfe,
  1521. vpfe_standards[vpfe->std_index].std_id);
  1522. get_out:
  1523. return ret;
  1524. }
  1525. static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
  1526. {
  1527. struct vpfe_device *vpfe = video_drvdata(file);
  1528. vpfe_dbg(2, vpfe,
  1529. "vpfe_s_input: index: %d\n", index);
  1530. return vpfe_set_input(vpfe, index);
  1531. }
  1532. static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
  1533. {
  1534. struct vpfe_device *vpfe = video_drvdata(file);
  1535. struct vpfe_subdev_info *sdinfo;
  1536. vpfe_dbg(2, vpfe, "vpfe_querystd\n");
  1537. sdinfo = vpfe->current_subdev;
  1538. if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
  1539. return -ENODATA;
  1540. /* Call querystd function of decoder device */
  1541. return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
  1542. video, querystd, std_id);
  1543. }
  1544. static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
  1545. {
  1546. struct vpfe_device *vpfe = video_drvdata(file);
  1547. struct vpfe_subdev_info *sdinfo;
  1548. int ret;
  1549. vpfe_dbg(2, vpfe, "vpfe_s_std\n");
  1550. sdinfo = vpfe->current_subdev;
  1551. if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
  1552. return -ENODATA;
  1553. /* If streaming is started, return error */
  1554. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1555. vpfe_err(vpfe, "%s device busy\n", __func__);
  1556. ret = -EBUSY;
  1557. return ret;
  1558. }
  1559. ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
  1560. video, s_std, std_id);
  1561. if (ret < 0) {
  1562. vpfe_err(vpfe, "Failed to set standard\n");
  1563. return ret;
  1564. }
  1565. ret = vpfe_config_image_format(vpfe, std_id);
  1566. return ret;
  1567. }
  1568. static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
  1569. {
  1570. struct vpfe_device *vpfe = video_drvdata(file);
  1571. struct vpfe_subdev_info *sdinfo;
  1572. vpfe_dbg(2, vpfe, "vpfe_g_std\n");
  1573. sdinfo = vpfe->current_subdev;
  1574. if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
  1575. return -ENODATA;
  1576. *std_id = vpfe_standards[vpfe->std_index].std_id;
  1577. return 0;
  1578. }
  1579. /*
  1580. * vpfe_calculate_offsets : This function calculates buffers offset
  1581. * for top and bottom field
  1582. */
  1583. static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
  1584. {
  1585. struct v4l2_rect image_win;
  1586. vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
  1587. vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
  1588. vpfe->field_off = image_win.height * image_win.width;
  1589. }
  1590. /*
  1591. * vpfe_queue_setup - Callback function for buffer setup.
  1592. * @vq: vb2_queue ptr
  1593. * @nbuffers: ptr to number of buffers requested by application
  1594. * @nplanes:: contains number of distinct video planes needed to hold a frame
  1595. * @sizes[]: contains the size (in bytes) of each plane.
  1596. * @alloc_devs: ptr to allocation context
  1597. *
  1598. * This callback function is called when reqbuf() is called to adjust
  1599. * the buffer count and buffer size
  1600. */
  1601. static int vpfe_queue_setup(struct vb2_queue *vq,
  1602. unsigned int *nbuffers, unsigned int *nplanes,
  1603. unsigned int sizes[], struct device *alloc_devs[])
  1604. {
  1605. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1606. unsigned size = vpfe->fmt.fmt.pix.sizeimage;
  1607. if (vq->num_buffers + *nbuffers < 3)
  1608. *nbuffers = 3 - vq->num_buffers;
  1609. if (*nplanes) {
  1610. if (sizes[0] < size)
  1611. return -EINVAL;
  1612. size = sizes[0];
  1613. }
  1614. *nplanes = 1;
  1615. sizes[0] = size;
  1616. vpfe_dbg(1, vpfe,
  1617. "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]);
  1618. /* Calculate field offset */
  1619. vpfe_calculate_offsets(vpfe);
  1620. return 0;
  1621. }
  1622. /*
  1623. * vpfe_buffer_prepare : callback function for buffer prepare
  1624. * @vb: ptr to vb2_buffer
  1625. *
  1626. * This is the callback function for buffer prepare when vb2_qbuf()
  1627. * function is called. The buffer is prepared and user space virtual address
  1628. * or user address is converted into physical address
  1629. */
  1630. static int vpfe_buffer_prepare(struct vb2_buffer *vb)
  1631. {
  1632. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1633. struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
  1634. vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
  1635. if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
  1636. return -EINVAL;
  1637. vbuf->field = vpfe->fmt.fmt.pix.field;
  1638. return 0;
  1639. }
  1640. /*
  1641. * vpfe_buffer_queue : Callback function to add buffer to DMA queue
  1642. * @vb: ptr to vb2_buffer
  1643. */
  1644. static void vpfe_buffer_queue(struct vb2_buffer *vb)
  1645. {
  1646. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1647. struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
  1648. struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf);
  1649. unsigned long flags = 0;
  1650. /* add the buffer to the DMA queue */
  1651. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1652. list_add_tail(&buf->list, &vpfe->dma_queue);
  1653. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1654. }
  1655. /*
  1656. * vpfe_start_streaming : Starts the DMA engine for streaming
  1657. * @vb: ptr to vb2_buffer
  1658. * @count: number of buffers
  1659. */
  1660. static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
  1661. {
  1662. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1663. struct vpfe_cap_buffer *buf, *tmp;
  1664. struct vpfe_subdev_info *sdinfo;
  1665. unsigned long flags;
  1666. unsigned long addr;
  1667. int ret;
  1668. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1669. vpfe->field = 0;
  1670. vpfe->sequence = 0;
  1671. sdinfo = vpfe->current_subdev;
  1672. vpfe_attach_irq(vpfe);
  1673. if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
  1674. vpfe_ccdc_config_raw(&vpfe->ccdc);
  1675. else
  1676. vpfe_ccdc_config_ycbcr(&vpfe->ccdc);
  1677. /* Get the next frame from the buffer queue */
  1678. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1679. struct vpfe_cap_buffer, list);
  1680. vpfe->cur_frm = vpfe->next_frm;
  1681. /* Remove buffer from the buffer queue */
  1682. list_del(&vpfe->cur_frm->list);
  1683. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1684. addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0);
  1685. vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
  1686. vpfe_pcr_enable(&vpfe->ccdc, 1);
  1687. ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1);
  1688. if (ret < 0) {
  1689. vpfe_err(vpfe, "Error in attaching interrupt handle\n");
  1690. goto err;
  1691. }
  1692. return 0;
  1693. err:
  1694. list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
  1695. list_del(&buf->list);
  1696. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
  1697. }
  1698. return ret;
  1699. }
  1700. /*
  1701. * vpfe_stop_streaming : Stop the DMA engine
  1702. * @vq: ptr to vb2_queue
  1703. *
  1704. * This callback stops the DMA engine and any remaining buffers
  1705. * in the DMA queue are released.
  1706. */
  1707. static void vpfe_stop_streaming(struct vb2_queue *vq)
  1708. {
  1709. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1710. struct vpfe_subdev_info *sdinfo;
  1711. unsigned long flags;
  1712. int ret;
  1713. vpfe_pcr_enable(&vpfe->ccdc, 0);
  1714. vpfe_detach_irq(vpfe);
  1715. sdinfo = vpfe->current_subdev;
  1716. ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0);
  1717. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1718. vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
  1719. /* release all active buffers */
  1720. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1721. if (vpfe->cur_frm == vpfe->next_frm) {
  1722. vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
  1723. VB2_BUF_STATE_ERROR);
  1724. } else {
  1725. if (vpfe->cur_frm != NULL)
  1726. vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
  1727. VB2_BUF_STATE_ERROR);
  1728. if (vpfe->next_frm != NULL)
  1729. vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
  1730. VB2_BUF_STATE_ERROR);
  1731. }
  1732. while (!list_empty(&vpfe->dma_queue)) {
  1733. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1734. struct vpfe_cap_buffer, list);
  1735. list_del(&vpfe->next_frm->list);
  1736. vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
  1737. VB2_BUF_STATE_ERROR);
  1738. }
  1739. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1740. }
  1741. static int vpfe_cropcap(struct file *file, void *priv,
  1742. struct v4l2_cropcap *crop)
  1743. {
  1744. struct vpfe_device *vpfe = video_drvdata(file);
  1745. vpfe_dbg(2, vpfe, "vpfe_cropcap\n");
  1746. if (vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
  1747. return -EINVAL;
  1748. memset(crop, 0, sizeof(struct v4l2_cropcap));
  1749. crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1750. crop->defrect.width = vpfe_standards[vpfe->std_index].width;
  1751. crop->bounds.width = crop->defrect.width;
  1752. crop->defrect.height = vpfe_standards[vpfe->std_index].height;
  1753. crop->bounds.height = crop->defrect.height;
  1754. crop->pixelaspect = vpfe_standards[vpfe->std_index].pixelaspect;
  1755. return 0;
  1756. }
  1757. static int
  1758. vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
  1759. {
  1760. struct vpfe_device *vpfe = video_drvdata(file);
  1761. switch (s->target) {
  1762. case V4L2_SEL_TGT_CROP_BOUNDS:
  1763. case V4L2_SEL_TGT_CROP_DEFAULT:
  1764. s->r.left = s->r.top = 0;
  1765. s->r.width = vpfe->crop.width;
  1766. s->r.height = vpfe->crop.height;
  1767. break;
  1768. case V4L2_SEL_TGT_CROP:
  1769. s->r = vpfe->crop;
  1770. break;
  1771. default:
  1772. return -EINVAL;
  1773. }
  1774. return 0;
  1775. }
  1776. static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
  1777. {
  1778. if (a->left < b->left || a->top < b->top)
  1779. return 0;
  1780. if (a->left + a->width > b->left + b->width)
  1781. return 0;
  1782. if (a->top + a->height > b->top + b->height)
  1783. return 0;
  1784. return 1;
  1785. }
  1786. static int
  1787. vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
  1788. {
  1789. struct vpfe_device *vpfe = video_drvdata(file);
  1790. struct v4l2_rect cr = vpfe->crop;
  1791. struct v4l2_rect r = s->r;
  1792. /* If streaming is started, return error */
  1793. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1794. vpfe_err(vpfe, "%s device busy\n", __func__);
  1795. return -EBUSY;
  1796. }
  1797. if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
  1798. s->target != V4L2_SEL_TGT_CROP)
  1799. return -EINVAL;
  1800. v4l_bound_align_image(&r.width, 0, cr.width, 0,
  1801. &r.height, 0, cr.height, 0, 0);
  1802. r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width);
  1803. r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height);
  1804. if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&r, &s->r))
  1805. return -ERANGE;
  1806. if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &r))
  1807. return -ERANGE;
  1808. s->r = vpfe->crop = r;
  1809. vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
  1810. vpfe->fmt.fmt.pix.width = r.width;
  1811. vpfe->fmt.fmt.pix.height = r.height;
  1812. vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
  1813. vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
  1814. vpfe->fmt.fmt.pix.height;
  1815. vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
  1816. r.left, r.top, r.width, r.height, cr.width, cr.height);
  1817. return 0;
  1818. }
  1819. static long vpfe_ioctl_default(struct file *file, void *priv,
  1820. bool valid_prio, unsigned int cmd, void *param)
  1821. {
  1822. struct vpfe_device *vpfe = video_drvdata(file);
  1823. int ret;
  1824. vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
  1825. if (!valid_prio) {
  1826. vpfe_err(vpfe, "%s device busy\n", __func__);
  1827. return -EBUSY;
  1828. }
  1829. /* If streaming is started, return error */
  1830. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1831. vpfe_err(vpfe, "%s device busy\n", __func__);
  1832. return -EBUSY;
  1833. }
  1834. switch (cmd) {
  1835. case VIDIOC_AM437X_CCDC_CFG:
  1836. ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param);
  1837. if (ret) {
  1838. vpfe_dbg(2, vpfe,
  1839. "Error setting parameters in CCDC\n");
  1840. return ret;
  1841. }
  1842. ret = vpfe_get_ccdc_image_format(vpfe,
  1843. &vpfe->fmt);
  1844. if (ret < 0) {
  1845. vpfe_dbg(2, vpfe,
  1846. "Invalid image format at CCDC\n");
  1847. return ret;
  1848. }
  1849. break;
  1850. default:
  1851. ret = -ENOTTY;
  1852. break;
  1853. }
  1854. return ret;
  1855. }
  1856. static const struct vb2_ops vpfe_video_qops = {
  1857. .wait_prepare = vb2_ops_wait_prepare,
  1858. .wait_finish = vb2_ops_wait_finish,
  1859. .queue_setup = vpfe_queue_setup,
  1860. .buf_prepare = vpfe_buffer_prepare,
  1861. .buf_queue = vpfe_buffer_queue,
  1862. .start_streaming = vpfe_start_streaming,
  1863. .stop_streaming = vpfe_stop_streaming,
  1864. };
  1865. /* vpfe capture driver file operations */
  1866. static const struct v4l2_file_operations vpfe_fops = {
  1867. .owner = THIS_MODULE,
  1868. .open = vpfe_open,
  1869. .release = vpfe_release,
  1870. .read = vb2_fop_read,
  1871. .poll = vb2_fop_poll,
  1872. .unlocked_ioctl = video_ioctl2,
  1873. .mmap = vb2_fop_mmap,
  1874. };
  1875. /* vpfe capture ioctl operations */
  1876. static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
  1877. .vidioc_querycap = vpfe_querycap,
  1878. .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
  1879. .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
  1880. .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
  1881. .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
  1882. .vidioc_enum_framesizes = vpfe_enum_size,
  1883. .vidioc_enum_input = vpfe_enum_input,
  1884. .vidioc_g_input = vpfe_g_input,
  1885. .vidioc_s_input = vpfe_s_input,
  1886. .vidioc_querystd = vpfe_querystd,
  1887. .vidioc_s_std = vpfe_s_std,
  1888. .vidioc_g_std = vpfe_g_std,
  1889. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  1890. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  1891. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  1892. .vidioc_querybuf = vb2_ioctl_querybuf,
  1893. .vidioc_qbuf = vb2_ioctl_qbuf,
  1894. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  1895. .vidioc_expbuf = vb2_ioctl_expbuf,
  1896. .vidioc_streamon = vb2_ioctl_streamon,
  1897. .vidioc_streamoff = vb2_ioctl_streamoff,
  1898. .vidioc_log_status = v4l2_ctrl_log_status,
  1899. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  1900. .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
  1901. .vidioc_cropcap = vpfe_cropcap,
  1902. .vidioc_g_selection = vpfe_g_selection,
  1903. .vidioc_s_selection = vpfe_s_selection,
  1904. .vidioc_default = vpfe_ioctl_default,
  1905. };
  1906. static int
  1907. vpfe_async_bound(struct v4l2_async_notifier *notifier,
  1908. struct v4l2_subdev *subdev,
  1909. struct v4l2_async_subdev *asd)
  1910. {
  1911. struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
  1912. struct vpfe_device, v4l2_dev);
  1913. struct v4l2_subdev_mbus_code_enum mbus_code;
  1914. struct vpfe_subdev_info *sdinfo;
  1915. bool found = false;
  1916. int i, j;
  1917. vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
  1918. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1919. if (vpfe->cfg->asd[i]->match.of.node == asd[i].match.of.node) {
  1920. sdinfo = &vpfe->cfg->sub_devs[i];
  1921. vpfe->sd[i] = subdev;
  1922. vpfe->sd[i]->grp_id = sdinfo->grp_id;
  1923. found = true;
  1924. break;
  1925. }
  1926. }
  1927. if (!found) {
  1928. vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name);
  1929. return -EINVAL;
  1930. }
  1931. vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
  1932. /* setup the supported formats & indexes */
  1933. for (j = 0, i = 0; ; ++j) {
  1934. struct vpfe_fmt *fmt;
  1935. int ret;
  1936. memset(&mbus_code, 0, sizeof(mbus_code));
  1937. mbus_code.index = j;
  1938. mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1939. ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
  1940. NULL, &mbus_code);
  1941. if (ret)
  1942. break;
  1943. fmt = find_format_by_code(mbus_code.code);
  1944. if (!fmt)
  1945. continue;
  1946. fmt->supported = true;
  1947. fmt->index = i++;
  1948. }
  1949. return 0;
  1950. }
  1951. static int vpfe_probe_complete(struct vpfe_device *vpfe)
  1952. {
  1953. struct video_device *vdev;
  1954. struct vb2_queue *q;
  1955. int err;
  1956. spin_lock_init(&vpfe->dma_queue_lock);
  1957. mutex_init(&vpfe->lock);
  1958. vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1959. /* set first sub device as current one */
  1960. vpfe->current_subdev = &vpfe->cfg->sub_devs[0];
  1961. vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler;
  1962. err = vpfe_set_input(vpfe, 0);
  1963. if (err)
  1964. goto probe_out;
  1965. /* Initialize videobuf2 queue as per the buffer type */
  1966. q = &vpfe->buffer_queue;
  1967. q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1968. q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
  1969. q->drv_priv = vpfe;
  1970. q->ops = &vpfe_video_qops;
  1971. q->mem_ops = &vb2_dma_contig_memops;
  1972. q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
  1973. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  1974. q->lock = &vpfe->lock;
  1975. q->min_buffers_needed = 1;
  1976. q->dev = vpfe->pdev;
  1977. err = vb2_queue_init(q);
  1978. if (err) {
  1979. vpfe_err(vpfe, "vb2_queue_init() failed\n");
  1980. goto probe_out;
  1981. }
  1982. INIT_LIST_HEAD(&vpfe->dma_queue);
  1983. vdev = &vpfe->video_dev;
  1984. strlcpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
  1985. vdev->release = video_device_release_empty;
  1986. vdev->fops = &vpfe_fops;
  1987. vdev->ioctl_ops = &vpfe_ioctl_ops;
  1988. vdev->v4l2_dev = &vpfe->v4l2_dev;
  1989. vdev->vfl_dir = VFL_DIR_RX;
  1990. vdev->queue = q;
  1991. vdev->lock = &vpfe->lock;
  1992. video_set_drvdata(vdev, vpfe);
  1993. err = video_register_device(&vpfe->video_dev, VFL_TYPE_GRABBER, -1);
  1994. if (err) {
  1995. vpfe_err(vpfe,
  1996. "Unable to register video device.\n");
  1997. goto probe_out;
  1998. }
  1999. return 0;
  2000. probe_out:
  2001. v4l2_device_unregister(&vpfe->v4l2_dev);
  2002. return err;
  2003. }
  2004. static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
  2005. {
  2006. struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
  2007. struct vpfe_device, v4l2_dev);
  2008. return vpfe_probe_complete(vpfe);
  2009. }
  2010. static struct vpfe_config *
  2011. vpfe_get_pdata(struct platform_device *pdev)
  2012. {
  2013. struct device_node *endpoint = NULL;
  2014. struct v4l2_of_endpoint bus_cfg;
  2015. struct vpfe_subdev_info *sdinfo;
  2016. struct vpfe_config *pdata;
  2017. unsigned int flags;
  2018. unsigned int i;
  2019. int err;
  2020. dev_dbg(&pdev->dev, "vpfe_get_pdata\n");
  2021. if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
  2022. return pdev->dev.platform_data;
  2023. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  2024. if (!pdata)
  2025. return NULL;
  2026. for (i = 0; ; i++) {
  2027. struct device_node *rem;
  2028. endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
  2029. endpoint);
  2030. if (!endpoint)
  2031. break;
  2032. sdinfo = &pdata->sub_devs[i];
  2033. sdinfo->grp_id = 0;
  2034. /* we only support camera */
  2035. sdinfo->inputs[0].index = i;
  2036. strcpy(sdinfo->inputs[0].name, "Camera");
  2037. sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA;
  2038. sdinfo->inputs[0].std = V4L2_STD_ALL;
  2039. sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD;
  2040. sdinfo->can_route = 0;
  2041. sdinfo->routes = NULL;
  2042. of_property_read_u32(endpoint, "ti,am437x-vpfe-interface",
  2043. &sdinfo->vpfe_param.if_type);
  2044. if (sdinfo->vpfe_param.if_type < 0 ||
  2045. sdinfo->vpfe_param.if_type > 4) {
  2046. sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
  2047. }
  2048. err = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
  2049. if (err) {
  2050. dev_err(&pdev->dev, "Could not parse the endpoint\n");
  2051. goto done;
  2052. }
  2053. sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width;
  2054. if (sdinfo->vpfe_param.bus_width < 8 ||
  2055. sdinfo->vpfe_param.bus_width > 16) {
  2056. dev_err(&pdev->dev, "Invalid bus width.\n");
  2057. goto done;
  2058. }
  2059. flags = bus_cfg.bus.parallel.flags;
  2060. if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
  2061. sdinfo->vpfe_param.hdpol = 1;
  2062. if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
  2063. sdinfo->vpfe_param.vdpol = 1;
  2064. rem = of_graph_get_remote_port_parent(endpoint);
  2065. if (!rem) {
  2066. dev_err(&pdev->dev, "Remote device at %s not found\n",
  2067. endpoint->full_name);
  2068. goto done;
  2069. }
  2070. pdata->asd[i] = devm_kzalloc(&pdev->dev,
  2071. sizeof(struct v4l2_async_subdev),
  2072. GFP_KERNEL);
  2073. if (!pdata->asd[i]) {
  2074. of_node_put(rem);
  2075. pdata = NULL;
  2076. goto done;
  2077. }
  2078. pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_OF;
  2079. pdata->asd[i]->match.of.node = rem;
  2080. of_node_put(rem);
  2081. }
  2082. of_node_put(endpoint);
  2083. return pdata;
  2084. done:
  2085. of_node_put(endpoint);
  2086. return NULL;
  2087. }
  2088. /*
  2089. * vpfe_probe : This function creates device entries by register
  2090. * itself to the V4L2 driver and initializes fields of each
  2091. * device objects
  2092. */
  2093. static int vpfe_probe(struct platform_device *pdev)
  2094. {
  2095. struct vpfe_config *vpfe_cfg = vpfe_get_pdata(pdev);
  2096. struct vpfe_device *vpfe;
  2097. struct vpfe_ccdc *ccdc;
  2098. struct resource *res;
  2099. int ret;
  2100. if (!vpfe_cfg) {
  2101. dev_err(&pdev->dev, "No platform data\n");
  2102. return -EINVAL;
  2103. }
  2104. vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
  2105. if (!vpfe)
  2106. return -ENOMEM;
  2107. vpfe->pdev = &pdev->dev;
  2108. vpfe->cfg = vpfe_cfg;
  2109. ccdc = &vpfe->ccdc;
  2110. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2111. ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
  2112. if (IS_ERR(ccdc->ccdc_cfg.base_addr))
  2113. return PTR_ERR(ccdc->ccdc_cfg.base_addr);
  2114. ret = platform_get_irq(pdev, 0);
  2115. if (ret <= 0) {
  2116. dev_err(&pdev->dev, "No IRQ resource\n");
  2117. return -ENODEV;
  2118. }
  2119. vpfe->irq = ret;
  2120. ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
  2121. "vpfe_capture0", vpfe);
  2122. if (ret) {
  2123. dev_err(&pdev->dev, "Unable to request interrupt\n");
  2124. return -EINVAL;
  2125. }
  2126. ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
  2127. if (ret) {
  2128. vpfe_err(vpfe,
  2129. "Unable to register v4l2 device.\n");
  2130. return ret;
  2131. }
  2132. /* set the driver data in platform device */
  2133. platform_set_drvdata(pdev, vpfe);
  2134. /* Enabling module functional clock */
  2135. pm_runtime_enable(&pdev->dev);
  2136. /* for now just enable it here instead of waiting for the open */
  2137. pm_runtime_get_sync(&pdev->dev);
  2138. vpfe_ccdc_config_defaults(ccdc);
  2139. pm_runtime_put_sync(&pdev->dev);
  2140. vpfe->sd = devm_kzalloc(&pdev->dev, sizeof(struct v4l2_subdev *) *
  2141. ARRAY_SIZE(vpfe->cfg->asd), GFP_KERNEL);
  2142. if (!vpfe->sd) {
  2143. ret = -ENOMEM;
  2144. goto probe_out_v4l2_unregister;
  2145. }
  2146. vpfe->notifier.subdevs = vpfe->cfg->asd;
  2147. vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
  2148. vpfe->notifier.bound = vpfe_async_bound;
  2149. vpfe->notifier.complete = vpfe_async_complete;
  2150. ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
  2151. &vpfe->notifier);
  2152. if (ret) {
  2153. vpfe_err(vpfe, "Error registering async notifier\n");
  2154. ret = -EINVAL;
  2155. goto probe_out_v4l2_unregister;
  2156. }
  2157. return 0;
  2158. probe_out_v4l2_unregister:
  2159. v4l2_device_unregister(&vpfe->v4l2_dev);
  2160. return ret;
  2161. }
  2162. /*
  2163. * vpfe_remove : It un-register device from V4L2 driver
  2164. */
  2165. static int vpfe_remove(struct platform_device *pdev)
  2166. {
  2167. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2168. vpfe_dbg(2, vpfe, "vpfe_remove\n");
  2169. pm_runtime_disable(&pdev->dev);
  2170. v4l2_async_notifier_unregister(&vpfe->notifier);
  2171. v4l2_device_unregister(&vpfe->v4l2_dev);
  2172. video_unregister_device(&vpfe->video_dev);
  2173. return 0;
  2174. }
  2175. #ifdef CONFIG_PM_SLEEP
  2176. static void vpfe_save_context(struct vpfe_ccdc *ccdc)
  2177. {
  2178. ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR);
  2179. ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE);
  2180. ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST);
  2181. ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR);
  2182. ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP);
  2183. ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB);
  2184. ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN);
  2185. ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP);
  2186. ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT);
  2187. ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW);
  2188. ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF);
  2189. ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG);
  2190. ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING);
  2191. ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc,
  2192. VPFE_HD_VD_WID);
  2193. ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc,
  2194. VPFE_PIX_LINES);
  2195. ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc,
  2196. VPFE_HORZ_INFO);
  2197. ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc,
  2198. VPFE_VERT_START);
  2199. ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc,
  2200. VPFE_VERT_LINES);
  2201. ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc,
  2202. VPFE_HSIZE_OFF);
  2203. }
  2204. static int vpfe_suspend(struct device *dev)
  2205. {
  2206. struct platform_device *pdev = to_platform_device(dev);
  2207. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2208. struct vpfe_ccdc *ccdc = &vpfe->ccdc;
  2209. /* if streaming has not started we don't care */
  2210. if (!vb2_start_streaming_called(&vpfe->buffer_queue))
  2211. return 0;
  2212. pm_runtime_get_sync(dev);
  2213. vpfe_config_enable(ccdc, 1);
  2214. /* Save VPFE context */
  2215. vpfe_save_context(ccdc);
  2216. /* Disable CCDC */
  2217. vpfe_pcr_enable(ccdc, 0);
  2218. vpfe_config_enable(ccdc, 0);
  2219. /* Disable both master and slave clock */
  2220. pm_runtime_put_sync(dev);
  2221. /* Select sleep pin state */
  2222. pinctrl_pm_select_sleep_state(dev);
  2223. return 0;
  2224. }
  2225. static void vpfe_restore_context(struct vpfe_ccdc *ccdc)
  2226. {
  2227. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE);
  2228. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING);
  2229. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST);
  2230. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR);
  2231. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP);
  2232. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB);
  2233. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN);
  2234. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP);
  2235. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT);
  2236. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW);
  2237. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF);
  2238. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG);
  2239. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR);
  2240. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2],
  2241. VPFE_HD_VD_WID);
  2242. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2],
  2243. VPFE_PIX_LINES);
  2244. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2],
  2245. VPFE_HORZ_INFO);
  2246. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2],
  2247. VPFE_VERT_START);
  2248. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2],
  2249. VPFE_VERT_LINES);
  2250. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2],
  2251. VPFE_HSIZE_OFF);
  2252. }
  2253. static int vpfe_resume(struct device *dev)
  2254. {
  2255. struct platform_device *pdev = to_platform_device(dev);
  2256. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2257. struct vpfe_ccdc *ccdc = &vpfe->ccdc;
  2258. /* if streaming has not started we don't care */
  2259. if (!vb2_start_streaming_called(&vpfe->buffer_queue))
  2260. return 0;
  2261. /* Enable both master and slave clock */
  2262. pm_runtime_get_sync(dev);
  2263. vpfe_config_enable(ccdc, 1);
  2264. /* Restore VPFE context */
  2265. vpfe_restore_context(ccdc);
  2266. vpfe_config_enable(ccdc, 0);
  2267. pm_runtime_put_sync(dev);
  2268. /* Select default pin state */
  2269. pinctrl_pm_select_default_state(dev);
  2270. return 0;
  2271. }
  2272. #endif
  2273. static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume);
  2274. static const struct of_device_id vpfe_of_match[] = {
  2275. { .compatible = "ti,am437x-vpfe", },
  2276. { /* sentinel */ },
  2277. };
  2278. MODULE_DEVICE_TABLE(of, vpfe_of_match);
  2279. static struct platform_driver vpfe_driver = {
  2280. .probe = vpfe_probe,
  2281. .remove = vpfe_remove,
  2282. .driver = {
  2283. .name = VPFE_MODULE_NAME,
  2284. .pm = &vpfe_pm_ops,
  2285. .of_match_table = of_match_ptr(vpfe_of_match),
  2286. },
  2287. };
  2288. module_platform_driver(vpfe_driver);
  2289. MODULE_AUTHOR("Texas Instruments");
  2290. MODULE_DESCRIPTION("TI AM437x VPFE driver");
  2291. MODULE_LICENSE("GPL");
  2292. MODULE_VERSION(VPFE_VERSION);