adf.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188
  1. /*
  2. * Copyright (C) 2013 Google, Inc.
  3. * adf_modeinfo_{set_name,set_vrefresh} modified from
  4. * drivers/gpu/drm/drm_modes.c
  5. * adf_format_validate_yuv modified from framebuffer_check in
  6. * drivers/gpu/drm/drm_crtc.c
  7. *
  8. * This software is licensed under the terms of the GNU General Public
  9. * License version 2, as published by the Free Software Foundation, and
  10. * may be copied, distributed, and modified under those terms.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. */
  18. #include <linux/device.h>
  19. #include <linux/idr.h>
  20. #include <linux/highmem.h>
  21. #include <linux/memblock.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/slab.h>
  25. #include <video/adf_format.h>
  26. #include "sw_sync.h"
  27. #include "sync.h"
  28. #include "adf.h"
  29. #include "adf_fops.h"
  30. #include "adf_sysfs.h"
  31. #define CREATE_TRACE_POINTS
  32. #include "adf_trace.h"
  33. #define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
  34. #define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
  35. static DEFINE_IDR(adf_devices);
  36. static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
  37. {
  38. /* sync_fence_wait() dumps debug information on timeout. Experience
  39. has shown that if the pipeline gets stuck, a short timeout followed
  40. by a longer one provides useful information for debugging. */
  41. int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT);
  42. if (err >= 0)
  43. return;
  44. if (err == -ETIME)
  45. err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT);
  46. if (err < 0)
  47. dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err);
  48. }
  49. void adf_buffer_cleanup(struct adf_buffer *buf)
  50. {
  51. size_t i;
  52. for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++)
  53. if (buf->dma_bufs[i])
  54. dma_buf_put(buf->dma_bufs[i]);
  55. if (buf->acquire_fence)
  56. sync_fence_put(buf->acquire_fence);
  57. }
  58. void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
  59. struct adf_buffer *buf)
  60. {
  61. /* calling adf_buffer_mapping_cleanup() is safe even if mapping is
  62. uninitialized or partially-initialized, as long as it was
  63. zeroed on allocation */
  64. size_t i;
  65. for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) {
  66. if (mapping->sg_tables[i])
  67. dma_buf_unmap_attachment(mapping->attachments[i],
  68. mapping->sg_tables[i], DMA_TO_DEVICE);
  69. if (mapping->attachments[i])
  70. dma_buf_detach(buf->dma_bufs[i],
  71. mapping->attachments[i]);
  72. }
  73. }
  74. void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post)
  75. {
  76. size_t i;
  77. if (post->state)
  78. dev->ops->state_free(dev, post->state);
  79. for (i = 0; i < post->config.n_bufs; i++) {
  80. adf_buffer_mapping_cleanup(&post->config.mappings[i],
  81. &post->config.bufs[i]);
  82. adf_buffer_cleanup(&post->config.bufs[i]);
  83. }
  84. kfree(post->config.custom_data);
  85. kfree(post->config.mappings);
  86. kfree(post->config.bufs);
  87. kfree(post);
  88. }
  89. static void adf_sw_advance_timeline(struct adf_device *dev)
  90. {
  91. #ifdef CONFIG_SW_SYNC
  92. sw_sync_timeline_inc(dev->timeline, 1);
  93. #else
  94. BUG();
  95. #endif
  96. }
  97. static void adf_post_work_func(struct kthread_work *work)
  98. {
  99. struct adf_device *dev =
  100. container_of(work, struct adf_device, post_work);
  101. struct adf_pending_post *post, *next;
  102. struct list_head saved_list;
  103. mutex_lock(&dev->post_lock);
  104. memcpy(&saved_list, &dev->post_list, sizeof(saved_list));
  105. list_replace_init(&dev->post_list, &saved_list);
  106. mutex_unlock(&dev->post_lock);
  107. list_for_each_entry_safe(post, next, &saved_list, head) {
  108. int i;
  109. for (i = 0; i < post->config.n_bufs; i++) {
  110. struct sync_fence *fence =
  111. post->config.bufs[i].acquire_fence;
  112. if (fence)
  113. adf_fence_wait(dev, fence);
  114. }
  115. dev->ops->post(dev, &post->config, post->state);
  116. if (dev->ops->advance_timeline)
  117. dev->ops->advance_timeline(dev, &post->config,
  118. post->state);
  119. else
  120. adf_sw_advance_timeline(dev);
  121. list_del(&post->head);
  122. if (dev->onscreen)
  123. adf_post_cleanup(dev, dev->onscreen);
  124. dev->onscreen = post;
  125. }
  126. }
  127. void adf_attachment_free(struct adf_attachment_list *attachment)
  128. {
  129. list_del(&attachment->head);
  130. kfree(attachment);
  131. }
  132. struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
  133. enum adf_event_type type)
  134. {
  135. struct rb_root *root = &obj->event_refcount;
  136. struct rb_node **new = &(root->rb_node);
  137. struct rb_node *parent = NULL;
  138. struct adf_event_refcount *refcount;
  139. while (*new) {
  140. refcount = container_of(*new, struct adf_event_refcount, node);
  141. parent = *new;
  142. if (refcount->type > type)
  143. new = &(*new)->rb_left;
  144. else if (refcount->type < type)
  145. new = &(*new)->rb_right;
  146. else
  147. return refcount;
  148. }
  149. refcount = kzalloc(sizeof(*refcount), GFP_KERNEL);
  150. if (!refcount)
  151. return NULL;
  152. refcount->type = type;
  153. rb_link_node(&refcount->node, parent, new);
  154. rb_insert_color(&refcount->node, root);
  155. return refcount;
  156. }
  157. /**
  158. * adf_event_get - increase the refcount for an event
  159. *
  160. * @obj: the object that produces the event
  161. * @type: the event type
  162. *
  163. * ADF will call the object's set_event() op if needed. ops are allowed
  164. * to sleep, so adf_event_get() must NOT be called from an atomic context.
  165. *
  166. * Returns 0 if successful, or -%EINVAL if the object does not support the
  167. * requested event type.
  168. */
  169. int adf_event_get(struct adf_obj *obj, enum adf_event_type type)
  170. {
  171. struct adf_event_refcount *refcount;
  172. int old_refcount;
  173. int ret;
  174. ret = adf_obj_check_supports_event(obj, type);
  175. if (ret < 0)
  176. return ret;
  177. mutex_lock(&obj->event_lock);
  178. refcount = adf_obj_find_event_refcount(obj, type);
  179. if (!refcount) {
  180. ret = -ENOMEM;
  181. goto done;
  182. }
  183. old_refcount = refcount->refcount++;
  184. if (old_refcount == 0) {
  185. obj->ops->set_event(obj, type, true);
  186. trace_adf_event_enable(obj, type);
  187. }
  188. done:
  189. mutex_unlock(&obj->event_lock);
  190. return ret;
  191. }
  192. EXPORT_SYMBOL(adf_event_get);
  193. /**
  194. * adf_event_put - decrease the refcount for an event
  195. *
  196. * @obj: the object that produces the event
  197. * @type: the event type
  198. *
  199. * ADF will call the object's set_event() op if needed. ops are allowed
  200. * to sleep, so adf_event_put() must NOT be called from an atomic context.
  201. *
  202. * Returns 0 if successful, -%EINVAL if the object does not support the
  203. * requested event type, or -%EALREADY if the refcount is already 0.
  204. */
  205. int adf_event_put(struct adf_obj *obj, enum adf_event_type type)
  206. {
  207. struct adf_event_refcount *refcount;
  208. int old_refcount;
  209. int ret;
  210. ret = adf_obj_check_supports_event(obj, type);
  211. if (ret < 0)
  212. return ret;
  213. mutex_lock(&obj->event_lock);
  214. refcount = adf_obj_find_event_refcount(obj, type);
  215. if (!refcount) {
  216. ret = -ENOMEM;
  217. goto done;
  218. }
  219. old_refcount = refcount->refcount--;
  220. if (WARN_ON(old_refcount == 0)) {
  221. refcount->refcount++;
  222. ret = -EALREADY;
  223. } else if (old_refcount == 1) {
  224. obj->ops->set_event(obj, type, false);
  225. trace_adf_event_disable(obj, type);
  226. }
  227. done:
  228. mutex_unlock(&obj->event_lock);
  229. return ret;
  230. }
  231. EXPORT_SYMBOL(adf_event_put);
  232. /**
  233. * adf_vsync_wait - wait for a vsync event on a display interface
  234. *
  235. * @intf: the display interface
  236. * @timeout: timeout in jiffies (0 = wait indefinitely)
  237. *
  238. * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context.
  239. *
  240. * This function returns -%ERESTARTSYS if it is interrupted by a signal.
  241. * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then
  242. * this function returns the number of remaining jiffies or -%ETIMEDOUT on
  243. * timeout.
  244. */
  245. int adf_vsync_wait(struct adf_interface *intf, long timeout)
  246. {
  247. ktime_t timestamp;
  248. int ret;
  249. unsigned long flags;
  250. read_lock_irqsave(&intf->vsync_lock, flags);
  251. timestamp = intf->vsync_timestamp;
  252. read_unlock_irqrestore(&intf->vsync_lock, flags);
  253. adf_vsync_get(intf);
  254. if (timeout) {
  255. ret = wait_event_interruptible_timeout(intf->vsync_wait,
  256. !ktime_equal(timestamp,
  257. intf->vsync_timestamp),
  258. msecs_to_jiffies(timeout));
  259. if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp))
  260. ret = -ETIMEDOUT;
  261. } else {
  262. ret = wait_event_interruptible(intf->vsync_wait,
  263. !ktime_equal(timestamp,
  264. intf->vsync_timestamp));
  265. }
  266. adf_vsync_put(intf);
  267. return ret;
  268. }
  269. EXPORT_SYMBOL(adf_vsync_wait);
  270. static void adf_event_queue(struct adf_obj *obj, struct adf_event *event)
  271. {
  272. struct adf_file *file;
  273. unsigned long flags;
  274. trace_adf_event(obj, event->type);
  275. spin_lock_irqsave(&obj->file_lock, flags);
  276. list_for_each_entry(file, &obj->file_list, head)
  277. if (test_bit(event->type, file->event_subscriptions))
  278. adf_file_queue_event(file, event);
  279. spin_unlock_irqrestore(&obj->file_lock, flags);
  280. }
  281. /**
  282. * adf_event_notify - notify userspace of a driver-private event
  283. *
  284. * @obj: the ADF object that produced the event
  285. * @event: the event
  286. *
  287. * adf_event_notify() may be called safely from an atomic context. It will
  288. * copy @event if needed, so @event may point to a variable on the stack.
  289. *
  290. * Drivers must NOT call adf_event_notify() for vsync and hotplug events.
  291. * ADF provides adf_vsync_notify() and
  292. * adf_hotplug_notify_{connected,disconnected}() for these events.
  293. */
  294. int adf_event_notify(struct adf_obj *obj, struct adf_event *event)
  295. {
  296. if (WARN_ON(event->type == ADF_EVENT_VSYNC ||
  297. event->type == ADF_EVENT_HOTPLUG))
  298. return -EINVAL;
  299. adf_event_queue(obj, event);
  300. return 0;
  301. }
  302. EXPORT_SYMBOL(adf_event_notify);
  303. /**
  304. * adf_vsync_notify - notify ADF of a display interface's vsync event
  305. *
  306. * @intf: the display interface
  307. * @timestamp: the time the vsync occurred
  308. *
  309. * adf_vsync_notify() may be called safely from an atomic context.
  310. */
  311. void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp)
  312. {
  313. unsigned long flags;
  314. struct adf_vsync_event event;
  315. write_lock_irqsave(&intf->vsync_lock, flags);
  316. intf->vsync_timestamp = timestamp;
  317. write_unlock_irqrestore(&intf->vsync_lock, flags);
  318. wake_up_interruptible_all(&intf->vsync_wait);
  319. event.base.type = ADF_EVENT_VSYNC;
  320. event.base.length = sizeof(event);
  321. event.timestamp = ktime_to_ns(timestamp);
  322. adf_event_queue(&intf->base, &event.base);
  323. }
  324. EXPORT_SYMBOL(adf_vsync_notify);
  325. void adf_hotplug_notify(struct adf_interface *intf, bool connected,
  326. struct drm_mode_modeinfo *modelist, size_t n_modes)
  327. {
  328. unsigned long flags;
  329. struct adf_hotplug_event event;
  330. struct drm_mode_modeinfo *old_modelist;
  331. write_lock_irqsave(&intf->hotplug_modelist_lock, flags);
  332. old_modelist = intf->modelist;
  333. intf->hotplug_detect = connected;
  334. intf->modelist = modelist;
  335. intf->n_modes = n_modes;
  336. write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
  337. kfree(old_modelist);
  338. event.base.length = sizeof(event);
  339. event.base.type = ADF_EVENT_HOTPLUG;
  340. event.connected = connected;
  341. adf_event_queue(&intf->base, &event.base);
  342. }
  343. /**
  344. * adf_hotplug_notify_connected - notify ADF of a display interface being
  345. * connected to a display
  346. *
  347. * @intf: the display interface
  348. * @modelist: hardware modes supported by display
  349. * @n_modes: length of modelist
  350. *
  351. * @modelist is copied as needed, so it may point to a variable on the stack.
  352. *
  353. * adf_hotplug_notify_connected() may NOT be called safely from an atomic
  354. * context.
  355. *
  356. * Returns 0 on success or error code (<0) on error.
  357. */
  358. int adf_hotplug_notify_connected(struct adf_interface *intf,
  359. struct drm_mode_modeinfo *modelist, size_t n_modes)
  360. {
  361. struct drm_mode_modeinfo *modelist_copy;
  362. if (n_modes > ADF_MAX_MODES)
  363. return -ENOMEM;
  364. modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes,
  365. GFP_KERNEL);
  366. if (!modelist_copy)
  367. return -ENOMEM;
  368. memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes);
  369. adf_hotplug_notify(intf, true, modelist_copy, n_modes);
  370. return 0;
  371. }
  372. EXPORT_SYMBOL(adf_hotplug_notify_connected);
  373. /**
  374. * adf_hotplug_notify_disconnected - notify ADF of a display interface being
  375. * disconnected from a display
  376. *
  377. * @intf: the display interface
  378. *
  379. * adf_hotplug_notify_disconnected() may be called safely from an atomic
  380. * context.
  381. */
  382. void adf_hotplug_notify_disconnected(struct adf_interface *intf)
  383. {
  384. adf_hotplug_notify(intf, false, NULL, 0);
  385. }
  386. EXPORT_SYMBOL(adf_hotplug_notify_disconnected);
  387. static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
  388. struct idr *idr, struct adf_device *parent,
  389. const struct adf_obj_ops *ops, const char *fmt, va_list args)
  390. {
  391. int ret;
  392. if (ops && ops->supports_event && !ops->set_event) {
  393. pr_err("%s: %s implements supports_event but not set_event\n",
  394. __func__, adf_obj_type_str(type));
  395. return -EINVAL;
  396. }
  397. ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
  398. if (ret < 0) {
  399. pr_err("%s: allocating object id failed: %d\n", __func__, ret);
  400. return ret;
  401. }
  402. obj->id = ret;
  403. vscnprintf(obj->name, sizeof(obj->name), fmt, args);
  404. obj->type = type;
  405. obj->ops = ops;
  406. obj->parent = parent;
  407. mutex_init(&obj->event_lock);
  408. obj->event_refcount = RB_ROOT;
  409. spin_lock_init(&obj->file_lock);
  410. INIT_LIST_HEAD(&obj->file_list);
  411. return 0;
  412. }
  413. static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
  414. {
  415. struct rb_node *node = rb_first(&obj->event_refcount);
  416. while (node) {
  417. struct adf_event_refcount *refcount =
  418. container_of(node, struct adf_event_refcount,
  419. node);
  420. rb_erase(&refcount->node, &obj->event_refcount);
  421. kfree(refcount);
  422. node = rb_first(&obj->event_refcount);
  423. }
  424. mutex_destroy(&obj->event_lock);
  425. idr_remove(idr, obj->id);
  426. }
  427. /**
  428. * adf_device_init - initialize ADF-internal data for a display device
  429. * and create sysfs entries
  430. *
  431. * @dev: the display device
  432. * @parent: the device's parent device
  433. * @ops: the device's associated ops
  434. * @fmt: formatting string for the display device's name
  435. *
  436. * @fmt specifies the device's sysfs filename and the name returned to
  437. * userspace through the %ADF_GET_DEVICE_DATA ioctl.
  438. *
  439. * Returns 0 on success or error code (<0) on failure.
  440. */
  441. int adf_device_init(struct adf_device *dev, struct device *parent,
  442. const struct adf_device_ops *ops, const char *fmt, ...)
  443. {
  444. int ret;
  445. va_list args;
  446. if (!ops->validate || !ops->post) {
  447. pr_err("%s: device must implement validate and post\n",
  448. __func__);
  449. return -EINVAL;
  450. }
  451. if (!ops->complete_fence && !ops->advance_timeline) {
  452. if (!IS_ENABLED(CONFIG_SW_SYNC)) {
  453. pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n",
  454. __func__);
  455. return -EINVAL;
  456. }
  457. } else if (!(ops->complete_fence && ops->advance_timeline)) {
  458. pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n",
  459. __func__);
  460. return -EINVAL;
  461. }
  462. memset(dev, 0, sizeof(*dev));
  463. va_start(args, fmt);
  464. ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
  465. &ops->base, fmt, args);
  466. va_end(args);
  467. if (ret < 0)
  468. return ret;
  469. dev->dev = parent;
  470. dev->ops = ops;
  471. idr_init(&dev->overlay_engines);
  472. idr_init(&dev->interfaces);
  473. mutex_init(&dev->client_lock);
  474. INIT_LIST_HEAD(&dev->post_list);
  475. mutex_init(&dev->post_lock);
  476. init_kthread_worker(&dev->post_worker);
  477. INIT_LIST_HEAD(&dev->attached);
  478. INIT_LIST_HEAD(&dev->attach_allowed);
  479. dev->post_thread = kthread_run(kthread_worker_fn,
  480. &dev->post_worker, dev->base.name);
  481. if (IS_ERR(dev->post_thread)) {
  482. ret = PTR_ERR(dev->post_thread);
  483. dev->post_thread = NULL;
  484. pr_err("%s: failed to run config posting thread: %d\n",
  485. __func__, ret);
  486. goto err;
  487. }
  488. init_kthread_work(&dev->post_work, adf_post_work_func);
  489. ret = adf_device_sysfs_init(dev);
  490. if (ret < 0)
  491. goto err;
  492. return 0;
  493. err:
  494. adf_device_destroy(dev);
  495. return ret;
  496. }
  497. EXPORT_SYMBOL(adf_device_init);
  498. /**
  499. * adf_device_destroy - clean up ADF-internal data for a display device
  500. *
  501. * @dev: the display device
  502. */
  503. void adf_device_destroy(struct adf_device *dev)
  504. {
  505. struct adf_attachment_list *entry, *next;
  506. idr_destroy(&dev->interfaces);
  507. idr_destroy(&dev->overlay_engines);
  508. if (dev->post_thread) {
  509. flush_kthread_worker(&dev->post_worker);
  510. kthread_stop(dev->post_thread);
  511. }
  512. if (dev->onscreen)
  513. adf_post_cleanup(dev, dev->onscreen);
  514. adf_device_sysfs_destroy(dev);
  515. list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
  516. adf_attachment_free(entry);
  517. }
  518. list_for_each_entry_safe(entry, next, &dev->attached, head) {
  519. adf_attachment_free(entry);
  520. }
  521. mutex_destroy(&dev->post_lock);
  522. mutex_destroy(&dev->client_lock);
  523. if (dev->timeline)
  524. sync_timeline_destroy(&dev->timeline->obj);
  525. adf_obj_destroy(&dev->base, &adf_devices);
  526. }
  527. EXPORT_SYMBOL(adf_device_destroy);
  528. /**
  529. * adf_interface_init - initialize ADF-internal data for a display interface
  530. * and create sysfs entries
  531. *
  532. * @intf: the display interface
  533. * @dev: the interface's "parent" display device
  534. * @type: interface type (see enum @adf_interface_type)
  535. * @idx: which interface of type @type;
  536. * e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1
  537. * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
  538. * @ops: the interface's associated ops
  539. * @fmt: formatting string for the display interface's name
  540. *
  541. * @dev must have previously been initialized with adf_device_init().
  542. *
  543. * @fmt affects the name returned to userspace through the
  544. * %ADF_GET_INTERFACE_DATA ioctl. It does not affect the sysfs filename,
  545. * which is derived from @dev's name.
  546. *
  547. * Returns 0 on success or error code (<0) on failure.
  548. */
  549. int adf_interface_init(struct adf_interface *intf, struct adf_device *dev,
  550. enum adf_interface_type type, u32 idx, u32 flags,
  551. const struct adf_interface_ops *ops, const char *fmt, ...)
  552. {
  553. int ret;
  554. va_list args;
  555. const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY |
  556. ADF_INTF_FLAG_EXTERNAL;
  557. if (dev->n_interfaces == ADF_MAX_INTERFACES) {
  558. pr_err("%s: parent device %s has too many interfaces\n",
  559. __func__, dev->base.name);
  560. return -ENOMEM;
  561. }
  562. if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) {
  563. pr_err("%s: invalid interface type %u\n", __func__, type);
  564. return -EINVAL;
  565. }
  566. if (flags & ~allowed_flags) {
  567. pr_err("%s: invalid interface flags 0x%X\n", __func__,
  568. flags & ~allowed_flags);
  569. return -EINVAL;
  570. }
  571. memset(intf, 0, sizeof(*intf));
  572. va_start(args, fmt);
  573. ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces,
  574. dev, ops ? &ops->base : NULL, fmt, args);
  575. va_end(args);
  576. if (ret < 0)
  577. return ret;
  578. intf->type = type;
  579. intf->idx = idx;
  580. intf->flags = flags;
  581. intf->ops = ops;
  582. intf->dpms_state = DRM_MODE_DPMS_OFF;
  583. init_waitqueue_head(&intf->vsync_wait);
  584. rwlock_init(&intf->vsync_lock);
  585. rwlock_init(&intf->hotplug_modelist_lock);
  586. ret = adf_interface_sysfs_init(intf);
  587. if (ret < 0)
  588. goto err;
  589. dev->n_interfaces++;
  590. return 0;
  591. err:
  592. adf_obj_destroy(&intf->base, &dev->interfaces);
  593. return ret;
  594. }
  595. EXPORT_SYMBOL(adf_interface_init);
  596. /**
  597. * adf_interface_destroy - clean up ADF-internal data for a display interface
  598. *
  599. * @intf: the display interface
  600. */
  601. void adf_interface_destroy(struct adf_interface *intf)
  602. {
  603. struct adf_device *dev = adf_interface_parent(intf);
  604. struct adf_attachment_list *entry, *next;
  605. mutex_lock(&dev->client_lock);
  606. list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
  607. if (entry->attachment.interface == intf) {
  608. adf_attachment_free(entry);
  609. dev->n_attach_allowed--;
  610. }
  611. }
  612. list_for_each_entry_safe(entry, next, &dev->attached, head) {
  613. if (entry->attachment.interface == intf) {
  614. adf_device_detach_op(dev,
  615. entry->attachment.overlay_engine, intf);
  616. adf_attachment_free(entry);
  617. dev->n_attached--;
  618. }
  619. }
  620. kfree(intf->modelist);
  621. adf_interface_sysfs_destroy(intf);
  622. adf_obj_destroy(&intf->base, &dev->interfaces);
  623. dev->n_interfaces--;
  624. mutex_unlock(&dev->client_lock);
  625. }
  626. EXPORT_SYMBOL(adf_interface_destroy);
  627. static bool adf_overlay_engine_has_custom_formats(
  628. const struct adf_overlay_engine_ops *ops)
  629. {
  630. size_t i;
  631. for (i = 0; i < ops->n_supported_formats; i++)
  632. if (!adf_format_is_standard(ops->supported_formats[i]))
  633. return true;
  634. return false;
  635. }
  636. /**
  637. * adf_overlay_engine_init - initialize ADF-internal data for an
  638. * overlay engine and create sysfs entries
  639. *
  640. * @eng: the overlay engine
  641. * @dev: the overlay engine's "parent" display device
  642. * @ops: the overlay engine's associated ops
  643. * @fmt: formatting string for the overlay engine's name
  644. *
  645. * @dev must have previously been initialized with adf_device_init().
  646. *
  647. * @fmt affects the name returned to userspace through the
  648. * %ADF_GET_OVERLAY_ENGINE_DATA ioctl. It does not affect the sysfs filename,
  649. * which is derived from @dev's name.
  650. *
  651. * Returns 0 on success or error code (<0) on failure.
  652. */
  653. int adf_overlay_engine_init(struct adf_overlay_engine *eng,
  654. struct adf_device *dev,
  655. const struct adf_overlay_engine_ops *ops, const char *fmt, ...)
  656. {
  657. int ret;
  658. va_list args;
  659. if (!ops->supported_formats) {
  660. pr_err("%s: overlay engine must support at least one format\n",
  661. __func__);
  662. return -EINVAL;
  663. }
  664. if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) {
  665. pr_err("%s: overlay engine supports too many formats\n",
  666. __func__);
  667. return -EINVAL;
  668. }
  669. if (adf_overlay_engine_has_custom_formats(ops) &&
  670. !dev->ops->validate_custom_format) {
  671. pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n",
  672. __func__, dev->base.name);
  673. return -EINVAL;
  674. }
  675. memset(eng, 0, sizeof(*eng));
  676. va_start(args, fmt);
  677. ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE,
  678. &dev->overlay_engines, dev, &ops->base, fmt, args);
  679. va_end(args);
  680. if (ret < 0)
  681. return ret;
  682. eng->ops = ops;
  683. ret = adf_overlay_engine_sysfs_init(eng);
  684. if (ret < 0)
  685. goto err;
  686. return 0;
  687. err:
  688. adf_obj_destroy(&eng->base, &dev->overlay_engines);
  689. return ret;
  690. }
  691. EXPORT_SYMBOL(adf_overlay_engine_init);
  692. /**
  693. * adf_interface_destroy - clean up ADF-internal data for an overlay engine
  694. *
  695. * @eng: the overlay engine
  696. */
  697. void adf_overlay_engine_destroy(struct adf_overlay_engine *eng)
  698. {
  699. struct adf_device *dev = adf_overlay_engine_parent(eng);
  700. struct adf_attachment_list *entry, *next;
  701. mutex_lock(&dev->client_lock);
  702. list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
  703. if (entry->attachment.overlay_engine == eng) {
  704. adf_attachment_free(entry);
  705. dev->n_attach_allowed--;
  706. }
  707. }
  708. list_for_each_entry_safe(entry, next, &dev->attached, head) {
  709. if (entry->attachment.overlay_engine == eng) {
  710. adf_device_detach_op(dev, eng,
  711. entry->attachment.interface);
  712. adf_attachment_free(entry);
  713. dev->n_attached--;
  714. }
  715. }
  716. adf_overlay_engine_sysfs_destroy(eng);
  717. adf_obj_destroy(&eng->base, &dev->overlay_engines);
  718. mutex_unlock(&dev->client_lock);
  719. }
  720. EXPORT_SYMBOL(adf_overlay_engine_destroy);
  721. struct adf_attachment_list *adf_attachment_find(struct list_head *list,
  722. struct adf_overlay_engine *eng, struct adf_interface *intf)
  723. {
  724. struct adf_attachment_list *entry;
  725. list_for_each_entry(entry, list, head) {
  726. if (entry->attachment.interface == intf &&
  727. entry->attachment.overlay_engine == eng)
  728. return entry;
  729. }
  730. return NULL;
  731. }
  732. int adf_attachment_validate(struct adf_device *dev,
  733. struct adf_overlay_engine *eng, struct adf_interface *intf)
  734. {
  735. struct adf_device *intf_dev = adf_interface_parent(intf);
  736. struct adf_device *eng_dev = adf_overlay_engine_parent(eng);
  737. if (intf_dev != dev) {
  738. dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n",
  739. intf->base.name, intf_dev->base.name);
  740. return -EINVAL;
  741. }
  742. if (eng_dev != dev) {
  743. dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n",
  744. eng->base.name, eng_dev->base.name);
  745. return -EINVAL;
  746. }
  747. return 0;
  748. }
  749. /**
  750. * adf_attachment_allow - add a new entry to the list of allowed
  751. * attachments
  752. *
  753. * @dev: the parent device
  754. * @eng: the overlay engine
  755. * @intf: the interface
  756. *
  757. * adf_attachment_allow() indicates that the underlying display hardware allows
  758. * @intf to scan out @eng's output. It is intended to be called at
  759. * driver initialization for each supported overlay engine + interface pair.
  760. *
  761. * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on
  762. * any other failure.
  763. */
  764. int adf_attachment_allow(struct adf_device *dev,
  765. struct adf_overlay_engine *eng, struct adf_interface *intf)
  766. {
  767. int ret;
  768. struct adf_attachment_list *entry = NULL;
  769. ret = adf_attachment_validate(dev, eng, intf);
  770. if (ret < 0)
  771. return ret;
  772. mutex_lock(&dev->client_lock);
  773. if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) {
  774. ret = -ENOMEM;
  775. goto done;
  776. }
  777. if (adf_attachment_find(&dev->attach_allowed, eng, intf)) {
  778. ret = -EALREADY;
  779. goto done;
  780. }
  781. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  782. if (!entry) {
  783. ret = -ENOMEM;
  784. goto done;
  785. }
  786. entry->attachment.interface = intf;
  787. entry->attachment.overlay_engine = eng;
  788. list_add_tail(&entry->head, &dev->attach_allowed);
  789. dev->n_attach_allowed++;
  790. done:
  791. mutex_unlock(&dev->client_lock);
  792. if (ret < 0)
  793. kfree(entry);
  794. return ret;
  795. }
  796. EXPORT_SYMBOL(adf_attachment_allow);
  797. /**
  798. * adf_obj_type_str - string representation of an adf_obj_type
  799. *
  800. * @type: the object type
  801. */
  802. const char *adf_obj_type_str(enum adf_obj_type type)
  803. {
  804. switch (type) {
  805. case ADF_OBJ_OVERLAY_ENGINE:
  806. return "overlay engine";
  807. case ADF_OBJ_INTERFACE:
  808. return "interface";
  809. case ADF_OBJ_DEVICE:
  810. return "device";
  811. default:
  812. return "unknown";
  813. }
  814. }
  815. EXPORT_SYMBOL(adf_obj_type_str);
  816. /**
  817. * adf_interface_type_str - string representation of an adf_interface's type
  818. *
  819. * @intf: the interface
  820. */
  821. const char *adf_interface_type_str(struct adf_interface *intf)
  822. {
  823. switch (intf->type) {
  824. case ADF_INTF_DSI:
  825. return "DSI";
  826. case ADF_INTF_eDP:
  827. return "eDP";
  828. case ADF_INTF_DPI:
  829. return "DPI";
  830. case ADF_INTF_VGA:
  831. return "VGA";
  832. case ADF_INTF_DVI:
  833. return "DVI";
  834. case ADF_INTF_HDMI:
  835. return "HDMI";
  836. case ADF_INTF_MEMORY:
  837. return "memory";
  838. default:
  839. if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) {
  840. if (intf->ops && intf->ops->type_str)
  841. return intf->ops->type_str(intf);
  842. return "custom";
  843. }
  844. return "unknown";
  845. }
  846. }
  847. EXPORT_SYMBOL(adf_interface_type_str);
  848. /**
  849. * adf_event_type_str - string representation of an adf_event_type
  850. *
  851. * @obj: ADF object that produced the event
  852. * @type: event type
  853. */
  854. const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type)
  855. {
  856. switch (type) {
  857. case ADF_EVENT_VSYNC:
  858. return "vsync";
  859. case ADF_EVENT_HOTPLUG:
  860. return "hotplug";
  861. default:
  862. if (type >= ADF_EVENT_DEVICE_CUSTOM) {
  863. if (obj->ops && obj->ops->event_type_str)
  864. return obj->ops->event_type_str(obj, type);
  865. return "custom";
  866. }
  867. return "unknown";
  868. }
  869. }
  870. EXPORT_SYMBOL(adf_event_type_str);
  871. /**
  872. * adf_format_str - string representation of an ADF/DRM fourcc format
  873. *
  874. * @format: format fourcc
  875. * @buf: target buffer for the format's string representation
  876. */
  877. void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE])
  878. {
  879. buf[0] = format & 0xFF;
  880. buf[1] = (format >> 8) & 0xFF;
  881. buf[2] = (format >> 16) & 0xFF;
  882. buf[3] = (format >> 24) & 0xFF;
  883. buf[4] = '\0';
  884. }
  885. EXPORT_SYMBOL(adf_format_str);
  886. /**
  887. * adf_format_validate_yuv - validate the number and size of planes in buffers
  888. * with a custom YUV format.
  889. *
  890. * @dev: ADF device performing the validation
  891. * @buf: buffer to validate
  892. * @num_planes: expected number of planes
  893. * @hsub: expected horizontal chroma subsampling factor, in pixels
  894. * @vsub: expected vertical chroma subsampling factor, in pixels
  895. * @cpp: expected bytes per pixel for each plane (length @num_planes)
  896. *
  897. * adf_format_validate_yuv() is intended to be called as a helper from @dev's
  898. * validate_custom_format() op.
  899. *
  900. * Returns 0 if @buf has the expected number of planes and each plane
  901. * has sufficient size, or -EINVAL otherwise.
  902. */
  903. int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
  904. u8 num_planes, u8 hsub, u8 vsub, u8 cpp[])
  905. {
  906. u8 i;
  907. if (num_planes != buf->n_planes) {
  908. char format_str[ADF_FORMAT_STR_SIZE];
  909. adf_format_str(buf->format, format_str);
  910. dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n",
  911. num_planes, format_str, buf->n_planes);
  912. return -EINVAL;
  913. }
  914. if (buf->w == 0 || buf->w % hsub) {
  915. dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w);
  916. return -EINVAL;
  917. }
  918. if (buf->h == 0 || buf->h % vsub) {
  919. dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h);
  920. return -EINVAL;
  921. }
  922. for (i = 0; i < num_planes; i++) {
  923. u32 width = buf->w / (i != 0 ? hsub : 1);
  924. u32 height = buf->h / (i != 0 ? vsub : 1);
  925. u8 cpp = adf_format_plane_cpp(buf->format, i);
  926. u32 last_line_size;
  927. if (buf->pitch[i] < (u64) width * cpp) {
  928. dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
  929. i, buf->pitch[i], width, cpp * 8);
  930. return -EINVAL;
  931. }
  932. switch (dev->ops->quirks.buffer_padding) {
  933. case ADF_BUFFER_PADDED_TO_PITCH:
  934. last_line_size = buf->pitch[i];
  935. break;
  936. case ADF_BUFFER_UNPADDED:
  937. last_line_size = width * cpp;
  938. break;
  939. default:
  940. BUG();
  941. }
  942. if ((u64) (height - 1) * buf->pitch[i] + last_line_size +
  943. buf->offset[i] > buf->dma_bufs[i]->size) {
  944. dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
  945. i, height, buf->pitch[i],
  946. buf->offset[i], buf->dma_bufs[i]->size);
  947. return -EINVAL;
  948. }
  949. }
  950. return 0;
  951. }
  952. EXPORT_SYMBOL(adf_format_validate_yuv);
  953. /**
  954. * adf_modeinfo_set_name - sets the name of a mode from its display resolution
  955. *
  956. * @mode: mode
  957. *
  958. * adf_modeinfo_set_name() fills in @mode->name in the format
  959. * "[hdisplay]x[vdisplay](i)". It is intended to help drivers create
  960. * ADF/DRM-style modelists from other mode formats.
  961. */
  962. void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode)
  963. {
  964. bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
  965. snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
  966. mode->hdisplay, mode->vdisplay,
  967. interlaced ? "i" : "");
  968. }
  969. EXPORT_SYMBOL(adf_modeinfo_set_name);
  970. /**
  971. * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other
  972. * timing data
  973. *
  974. * @mode: mode
  975. *
  976. * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from
  977. * @mode->{h,v}display and @mode->flags. It is intended to help drivers
  978. * create ADF/DRM-style modelists from other mode formats.
  979. */
  980. void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode)
  981. {
  982. int refresh = 0;
  983. unsigned int calc_val;
  984. if (mode->vrefresh > 0)
  985. return;
  986. if (mode->htotal <= 0 || mode->vtotal <= 0)
  987. return;
  988. /* work out vrefresh the value will be x1000 */
  989. calc_val = (mode->clock * 1000);
  990. calc_val /= mode->htotal;
  991. refresh = (calc_val + mode->vtotal / 2) / mode->vtotal;
  992. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  993. refresh *= 2;
  994. if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
  995. refresh /= 2;
  996. if (mode->vscan > 1)
  997. refresh /= mode->vscan;
  998. mode->vrefresh = refresh;
  999. }
  1000. EXPORT_SYMBOL(adf_modeinfo_set_vrefresh);
  1001. static int __init adf_init(void)
  1002. {
  1003. int err;
  1004. err = adf_sysfs_init();
  1005. if (err < 0)
  1006. return err;
  1007. return 0;
  1008. }
  1009. static void __exit adf_exit(void)
  1010. {
  1011. adf_sysfs_destroy();
  1012. }
  1013. module_init(adf_init);
  1014. module_exit(adf_exit);