spi-bfin-sport.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919
  1. /*
  2. * SPI bus via the Blackfin SPORT peripheral
  3. *
  4. * Enter bugs at http://blackfin.uclinux.org/
  5. *
  6. * Copyright 2009-2011 Analog Devices Inc.
  7. *
  8. * Licensed under the GPL-2 or later.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/delay.h>
  12. #include <linux/device.h>
  13. #include <linux/gpio.h>
  14. #include <linux/io.h>
  15. #include <linux/ioport.h>
  16. #include <linux/irq.h>
  17. #include <linux/errno.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/spi/spi.h>
  21. #include <linux/workqueue.h>
  22. #include <asm/portmux.h>
  23. #include <asm/bfin5xx_spi.h>
  24. #include <asm/blackfin.h>
  25. #include <asm/bfin_sport.h>
  26. #include <asm/cacheflush.h>
  27. #define DRV_NAME "bfin-sport-spi"
  28. #define DRV_DESC "SPI bus via the Blackfin SPORT"
  29. MODULE_AUTHOR("Cliff Cai");
  30. MODULE_DESCRIPTION(DRV_DESC);
  31. MODULE_LICENSE("GPL");
  32. MODULE_ALIAS("platform:bfin-sport-spi");
  33. enum bfin_sport_spi_state {
  34. START_STATE,
  35. RUNNING_STATE,
  36. DONE_STATE,
  37. ERROR_STATE,
  38. };
  39. struct bfin_sport_spi_master_data;
  40. struct bfin_sport_transfer_ops {
  41. void (*write) (struct bfin_sport_spi_master_data *);
  42. void (*read) (struct bfin_sport_spi_master_data *);
  43. void (*duplex) (struct bfin_sport_spi_master_data *);
  44. };
  45. struct bfin_sport_spi_master_data {
  46. /* Driver model hookup */
  47. struct device *dev;
  48. /* SPI framework hookup */
  49. struct spi_master *master;
  50. /* Regs base of SPI controller */
  51. struct sport_register __iomem *regs;
  52. int err_irq;
  53. /* Pin request list */
  54. u16 *pin_req;
  55. struct work_struct pump_messages;
  56. spinlock_t lock;
  57. struct list_head queue;
  58. int busy;
  59. bool run;
  60. /* Message Transfer pump */
  61. struct tasklet_struct pump_transfers;
  62. /* Current message transfer state info */
  63. enum bfin_sport_spi_state state;
  64. struct spi_message *cur_msg;
  65. struct spi_transfer *cur_transfer;
  66. struct bfin_sport_spi_slave_data *cur_chip;
  67. union {
  68. void *tx;
  69. u8 *tx8;
  70. u16 *tx16;
  71. };
  72. void *tx_end;
  73. union {
  74. void *rx;
  75. u8 *rx8;
  76. u16 *rx16;
  77. };
  78. void *rx_end;
  79. int cs_change;
  80. struct bfin_sport_transfer_ops *ops;
  81. };
  82. struct bfin_sport_spi_slave_data {
  83. u16 ctl_reg;
  84. u16 baud;
  85. u16 cs_chg_udelay; /* Some devices require > 255usec delay */
  86. u32 cs_gpio;
  87. u16 idle_tx_val;
  88. struct bfin_sport_transfer_ops *ops;
  89. };
  90. static void
  91. bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
  92. {
  93. bfin_write_or(&drv_data->regs->tcr1, TSPEN);
  94. bfin_write_or(&drv_data->regs->rcr1, TSPEN);
  95. SSYNC();
  96. }
  97. static void
  98. bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
  99. {
  100. bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
  101. bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
  102. SSYNC();
  103. }
  104. /* Caculate the SPI_BAUD register value based on input HZ */
  105. static u16
  106. bfin_sport_hz_to_spi_baud(u32 speed_hz)
  107. {
  108. u_long clk, sclk = get_sclk();
  109. int div = (sclk / (2 * speed_hz)) - 1;
  110. if (div < 0)
  111. div = 0;
  112. clk = sclk / (2 * (div + 1));
  113. if (clk > speed_hz)
  114. div++;
  115. return div;
  116. }
  117. /* Chip select operation functions for cs_change flag */
  118. static void
  119. bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
  120. {
  121. gpio_direction_output(chip->cs_gpio, 0);
  122. }
  123. static void
  124. bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
  125. {
  126. gpio_direction_output(chip->cs_gpio, 1);
  127. /* Move delay here for consistency */
  128. if (chip->cs_chg_udelay)
  129. udelay(chip->cs_chg_udelay);
  130. }
  131. static void
  132. bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
  133. {
  134. unsigned long timeout = jiffies + HZ;
  135. while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
  136. if (!time_before(jiffies, timeout))
  137. break;
  138. }
  139. }
  140. static void
  141. bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
  142. {
  143. u16 dummy;
  144. while (drv_data->tx < drv_data->tx_end) {
  145. bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
  146. bfin_sport_spi_stat_poll_complete(drv_data);
  147. dummy = bfin_read(&drv_data->regs->rx16);
  148. }
  149. }
  150. static void
  151. bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
  152. {
  153. u16 tx_val = drv_data->cur_chip->idle_tx_val;
  154. while (drv_data->rx < drv_data->rx_end) {
  155. bfin_write(&drv_data->regs->tx16, tx_val);
  156. bfin_sport_spi_stat_poll_complete(drv_data);
  157. *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
  158. }
  159. }
  160. static void
  161. bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
  162. {
  163. while (drv_data->rx < drv_data->rx_end) {
  164. bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
  165. bfin_sport_spi_stat_poll_complete(drv_data);
  166. *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
  167. }
  168. }
  169. static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
  170. .write = bfin_sport_spi_u8_writer,
  171. .read = bfin_sport_spi_u8_reader,
  172. .duplex = bfin_sport_spi_u8_duplex,
  173. };
  174. static void
  175. bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
  176. {
  177. u16 dummy;
  178. while (drv_data->tx < drv_data->tx_end) {
  179. bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
  180. bfin_sport_spi_stat_poll_complete(drv_data);
  181. dummy = bfin_read(&drv_data->regs->rx16);
  182. }
  183. }
  184. static void
  185. bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
  186. {
  187. u16 tx_val = drv_data->cur_chip->idle_tx_val;
  188. while (drv_data->rx < drv_data->rx_end) {
  189. bfin_write(&drv_data->regs->tx16, tx_val);
  190. bfin_sport_spi_stat_poll_complete(drv_data);
  191. *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
  192. }
  193. }
  194. static void
  195. bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
  196. {
  197. while (drv_data->rx < drv_data->rx_end) {
  198. bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
  199. bfin_sport_spi_stat_poll_complete(drv_data);
  200. *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
  201. }
  202. }
  203. static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
  204. .write = bfin_sport_spi_u16_writer,
  205. .read = bfin_sport_spi_u16_reader,
  206. .duplex = bfin_sport_spi_u16_duplex,
  207. };
  208. /* stop controller and re-config current chip */
  209. static void
  210. bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
  211. {
  212. struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
  213. bfin_sport_spi_disable(drv_data);
  214. dev_dbg(drv_data->dev, "restoring spi ctl state\n");
  215. bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
  216. bfin_write(&drv_data->regs->tclkdiv, chip->baud);
  217. SSYNC();
  218. bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
  219. SSYNC();
  220. bfin_sport_spi_cs_active(chip);
  221. }
  222. /* test if there is more transfer to be done */
  223. static enum bfin_sport_spi_state
  224. bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
  225. {
  226. struct spi_message *msg = drv_data->cur_msg;
  227. struct spi_transfer *trans = drv_data->cur_transfer;
  228. /* Move to next transfer */
  229. if (trans->transfer_list.next != &msg->transfers) {
  230. drv_data->cur_transfer =
  231. list_entry(trans->transfer_list.next,
  232. struct spi_transfer, transfer_list);
  233. return RUNNING_STATE;
  234. }
  235. return DONE_STATE;
  236. }
  237. /*
  238. * caller already set message->status;
  239. * dma and pio irqs are blocked give finished message back
  240. */
  241. static void
  242. bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
  243. {
  244. struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
  245. unsigned long flags;
  246. struct spi_message *msg;
  247. spin_lock_irqsave(&drv_data->lock, flags);
  248. msg = drv_data->cur_msg;
  249. drv_data->state = START_STATE;
  250. drv_data->cur_msg = NULL;
  251. drv_data->cur_transfer = NULL;
  252. drv_data->cur_chip = NULL;
  253. schedule_work(&drv_data->pump_messages);
  254. spin_unlock_irqrestore(&drv_data->lock, flags);
  255. if (!drv_data->cs_change)
  256. bfin_sport_spi_cs_deactive(chip);
  257. if (msg->complete)
  258. msg->complete(msg->context);
  259. }
  260. static irqreturn_t
  261. sport_err_handler(int irq, void *dev_id)
  262. {
  263. struct bfin_sport_spi_master_data *drv_data = dev_id;
  264. u16 status;
  265. dev_dbg(drv_data->dev, "%s enter\n", __func__);
  266. status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
  267. if (status) {
  268. bfin_write(&drv_data->regs->stat, status);
  269. SSYNC();
  270. bfin_sport_spi_disable(drv_data);
  271. dev_err(drv_data->dev, "status error:%s%s%s%s\n",
  272. status & TOVF ? " TOVF" : "",
  273. status & TUVF ? " TUVF" : "",
  274. status & ROVF ? " ROVF" : "",
  275. status & RUVF ? " RUVF" : "");
  276. }
  277. return IRQ_HANDLED;
  278. }
  279. static void
  280. bfin_sport_spi_pump_transfers(unsigned long data)
  281. {
  282. struct bfin_sport_spi_master_data *drv_data = (void *)data;
  283. struct spi_message *message = NULL;
  284. struct spi_transfer *transfer = NULL;
  285. struct spi_transfer *previous = NULL;
  286. struct bfin_sport_spi_slave_data *chip = NULL;
  287. unsigned int bits_per_word;
  288. u32 tranf_success = 1;
  289. u32 transfer_speed;
  290. u8 full_duplex = 0;
  291. /* Get current state information */
  292. message = drv_data->cur_msg;
  293. transfer = drv_data->cur_transfer;
  294. chip = drv_data->cur_chip;
  295. transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
  296. bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
  297. SSYNC();
  298. /*
  299. * if msg is error or done, report it back using complete() callback
  300. */
  301. /* Handle for abort */
  302. if (drv_data->state == ERROR_STATE) {
  303. dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
  304. message->status = -EIO;
  305. bfin_sport_spi_giveback(drv_data);
  306. return;
  307. }
  308. /* Handle end of message */
  309. if (drv_data->state == DONE_STATE) {
  310. dev_dbg(drv_data->dev, "transfer: all done!\n");
  311. message->status = 0;
  312. bfin_sport_spi_giveback(drv_data);
  313. return;
  314. }
  315. /* Delay if requested at end of transfer */
  316. if (drv_data->state == RUNNING_STATE) {
  317. dev_dbg(drv_data->dev, "transfer: still running ...\n");
  318. previous = list_entry(transfer->transfer_list.prev,
  319. struct spi_transfer, transfer_list);
  320. if (previous->delay_usecs)
  321. udelay(previous->delay_usecs);
  322. }
  323. if (transfer->len == 0) {
  324. /* Move to next transfer of this msg */
  325. drv_data->state = bfin_sport_spi_next_transfer(drv_data);
  326. /* Schedule next transfer tasklet */
  327. tasklet_schedule(&drv_data->pump_transfers);
  328. }
  329. if (transfer->tx_buf != NULL) {
  330. drv_data->tx = (void *)transfer->tx_buf;
  331. drv_data->tx_end = drv_data->tx + transfer->len;
  332. dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
  333. transfer->tx_buf, drv_data->tx_end);
  334. } else
  335. drv_data->tx = NULL;
  336. if (transfer->rx_buf != NULL) {
  337. full_duplex = transfer->tx_buf != NULL;
  338. drv_data->rx = transfer->rx_buf;
  339. drv_data->rx_end = drv_data->rx + transfer->len;
  340. dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
  341. transfer->rx_buf, drv_data->rx_end);
  342. } else
  343. drv_data->rx = NULL;
  344. drv_data->cs_change = transfer->cs_change;
  345. /* Bits per word setup */
  346. bits_per_word = transfer->bits_per_word;
  347. if (bits_per_word == 16)
  348. drv_data->ops = &bfin_sport_transfer_ops_u16;
  349. else
  350. drv_data->ops = &bfin_sport_transfer_ops_u8;
  351. bfin_write(&drv_data->regs->tcr2, bits_per_word - 1);
  352. bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1);
  353. bfin_write(&drv_data->regs->rcr2, bits_per_word - 1);
  354. drv_data->state = RUNNING_STATE;
  355. if (drv_data->cs_change)
  356. bfin_sport_spi_cs_active(chip);
  357. dev_dbg(drv_data->dev,
  358. "now pumping a transfer: width is %d, len is %d\n",
  359. bits_per_word, transfer->len);
  360. /* PIO mode write then read */
  361. dev_dbg(drv_data->dev, "doing IO transfer\n");
  362. bfin_sport_spi_enable(drv_data);
  363. if (full_duplex) {
  364. /* full duplex mode */
  365. BUG_ON((drv_data->tx_end - drv_data->tx) !=
  366. (drv_data->rx_end - drv_data->rx));
  367. drv_data->ops->duplex(drv_data);
  368. if (drv_data->tx != drv_data->tx_end)
  369. tranf_success = 0;
  370. } else if (drv_data->tx != NULL) {
  371. /* write only half duplex */
  372. drv_data->ops->write(drv_data);
  373. if (drv_data->tx != drv_data->tx_end)
  374. tranf_success = 0;
  375. } else if (drv_data->rx != NULL) {
  376. /* read only half duplex */
  377. drv_data->ops->read(drv_data);
  378. if (drv_data->rx != drv_data->rx_end)
  379. tranf_success = 0;
  380. }
  381. bfin_sport_spi_disable(drv_data);
  382. if (!tranf_success) {
  383. dev_dbg(drv_data->dev, "IO write error!\n");
  384. drv_data->state = ERROR_STATE;
  385. } else {
  386. /* Update total byte transferred */
  387. message->actual_length += transfer->len;
  388. /* Move to next transfer of this msg */
  389. drv_data->state = bfin_sport_spi_next_transfer(drv_data);
  390. if (drv_data->cs_change)
  391. bfin_sport_spi_cs_deactive(chip);
  392. }
  393. /* Schedule next transfer tasklet */
  394. tasklet_schedule(&drv_data->pump_transfers);
  395. }
  396. /* pop a msg from queue and kick off real transfer */
  397. static void
  398. bfin_sport_spi_pump_messages(struct work_struct *work)
  399. {
  400. struct bfin_sport_spi_master_data *drv_data;
  401. unsigned long flags;
  402. struct spi_message *next_msg;
  403. drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
  404. /* Lock queue and check for queue work */
  405. spin_lock_irqsave(&drv_data->lock, flags);
  406. if (list_empty(&drv_data->queue) || !drv_data->run) {
  407. /* pumper kicked off but no work to do */
  408. drv_data->busy = 0;
  409. spin_unlock_irqrestore(&drv_data->lock, flags);
  410. return;
  411. }
  412. /* Make sure we are not already running a message */
  413. if (drv_data->cur_msg) {
  414. spin_unlock_irqrestore(&drv_data->lock, flags);
  415. return;
  416. }
  417. /* Extract head of queue */
  418. next_msg = list_entry(drv_data->queue.next,
  419. struct spi_message, queue);
  420. drv_data->cur_msg = next_msg;
  421. /* Setup the SSP using the per chip configuration */
  422. drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
  423. list_del_init(&drv_data->cur_msg->queue);
  424. /* Initialize message state */
  425. drv_data->cur_msg->state = START_STATE;
  426. drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
  427. struct spi_transfer, transfer_list);
  428. bfin_sport_spi_restore_state(drv_data);
  429. dev_dbg(drv_data->dev, "got a message to pump, "
  430. "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
  431. drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
  432. drv_data->cur_chip->ctl_reg);
  433. dev_dbg(drv_data->dev,
  434. "the first transfer len is %d\n",
  435. drv_data->cur_transfer->len);
  436. /* Mark as busy and launch transfers */
  437. tasklet_schedule(&drv_data->pump_transfers);
  438. drv_data->busy = 1;
  439. spin_unlock_irqrestore(&drv_data->lock, flags);
  440. }
  441. /*
  442. * got a msg to transfer, queue it in drv_data->queue.
  443. * And kick off message pumper
  444. */
  445. static int
  446. bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  447. {
  448. struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
  449. unsigned long flags;
  450. spin_lock_irqsave(&drv_data->lock, flags);
  451. if (!drv_data->run) {
  452. spin_unlock_irqrestore(&drv_data->lock, flags);
  453. return -ESHUTDOWN;
  454. }
  455. msg->actual_length = 0;
  456. msg->status = -EINPROGRESS;
  457. msg->state = START_STATE;
  458. dev_dbg(&spi->dev, "adding an msg in transfer()\n");
  459. list_add_tail(&msg->queue, &drv_data->queue);
  460. if (drv_data->run && !drv_data->busy)
  461. schedule_work(&drv_data->pump_messages);
  462. spin_unlock_irqrestore(&drv_data->lock, flags);
  463. return 0;
  464. }
  465. /* Called every time common spi devices change state */
  466. static int
  467. bfin_sport_spi_setup(struct spi_device *spi)
  468. {
  469. struct bfin_sport_spi_slave_data *chip, *first = NULL;
  470. int ret;
  471. /* Only alloc (or use chip_info) on first setup */
  472. chip = spi_get_ctldata(spi);
  473. if (chip == NULL) {
  474. struct bfin5xx_spi_chip *chip_info;
  475. chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
  476. if (!chip)
  477. return -ENOMEM;
  478. /* platform chip_info isn't required */
  479. chip_info = spi->controller_data;
  480. if (chip_info) {
  481. /*
  482. * DITFS and TDTYPE are only thing we don't set, but
  483. * they probably shouldn't be changed by people.
  484. */
  485. if (chip_info->ctl_reg || chip_info->enable_dma) {
  486. ret = -EINVAL;
  487. dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields\n");
  488. goto error;
  489. }
  490. chip->cs_chg_udelay = chip_info->cs_chg_udelay;
  491. chip->idle_tx_val = chip_info->idle_tx_val;
  492. }
  493. }
  494. /* translate common spi framework into our register
  495. * following configure contents are same for tx and rx.
  496. */
  497. if (spi->mode & SPI_CPHA)
  498. chip->ctl_reg &= ~TCKFE;
  499. else
  500. chip->ctl_reg |= TCKFE;
  501. if (spi->mode & SPI_LSB_FIRST)
  502. chip->ctl_reg |= TLSBIT;
  503. else
  504. chip->ctl_reg &= ~TLSBIT;
  505. /* Sport in master mode */
  506. chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
  507. chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
  508. chip->cs_gpio = spi->chip_select;
  509. ret = gpio_request(chip->cs_gpio, spi->modalias);
  510. if (ret)
  511. goto error;
  512. dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
  513. spi->modalias, spi->bits_per_word);
  514. dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
  515. chip->ctl_reg, spi->chip_select);
  516. spi_set_ctldata(spi, chip);
  517. bfin_sport_spi_cs_deactive(chip);
  518. return ret;
  519. error:
  520. kfree(first);
  521. return ret;
  522. }
  523. /*
  524. * callback for spi framework.
  525. * clean driver specific data
  526. */
  527. static void
  528. bfin_sport_spi_cleanup(struct spi_device *spi)
  529. {
  530. struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
  531. if (!chip)
  532. return;
  533. gpio_free(chip->cs_gpio);
  534. kfree(chip);
  535. }
  536. static int
  537. bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
  538. {
  539. INIT_LIST_HEAD(&drv_data->queue);
  540. spin_lock_init(&drv_data->lock);
  541. drv_data->run = false;
  542. drv_data->busy = 0;
  543. /* init transfer tasklet */
  544. tasklet_init(&drv_data->pump_transfers,
  545. bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
  546. INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
  547. return 0;
  548. }
  549. static int
  550. bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
  551. {
  552. unsigned long flags;
  553. spin_lock_irqsave(&drv_data->lock, flags);
  554. if (drv_data->run || drv_data->busy) {
  555. spin_unlock_irqrestore(&drv_data->lock, flags);
  556. return -EBUSY;
  557. }
  558. drv_data->run = true;
  559. drv_data->cur_msg = NULL;
  560. drv_data->cur_transfer = NULL;
  561. drv_data->cur_chip = NULL;
  562. spin_unlock_irqrestore(&drv_data->lock, flags);
  563. schedule_work(&drv_data->pump_messages);
  564. return 0;
  565. }
  566. static inline int
  567. bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
  568. {
  569. unsigned long flags;
  570. unsigned limit = 500;
  571. int status = 0;
  572. spin_lock_irqsave(&drv_data->lock, flags);
  573. /*
  574. * This is a bit lame, but is optimized for the common execution path.
  575. * A wait_queue on the drv_data->busy could be used, but then the common
  576. * execution path (pump_messages) would be required to call wake_up or
  577. * friends on every SPI message. Do this instead
  578. */
  579. drv_data->run = false;
  580. while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
  581. spin_unlock_irqrestore(&drv_data->lock, flags);
  582. msleep(10);
  583. spin_lock_irqsave(&drv_data->lock, flags);
  584. }
  585. if (!list_empty(&drv_data->queue) || drv_data->busy)
  586. status = -EBUSY;
  587. spin_unlock_irqrestore(&drv_data->lock, flags);
  588. return status;
  589. }
  590. static inline int
  591. bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
  592. {
  593. int status;
  594. status = bfin_sport_spi_stop_queue(drv_data);
  595. if (status)
  596. return status;
  597. flush_work(&drv_data->pump_messages);
  598. return 0;
  599. }
  600. static int bfin_sport_spi_probe(struct platform_device *pdev)
  601. {
  602. struct device *dev = &pdev->dev;
  603. struct bfin5xx_spi_master *platform_info;
  604. struct spi_master *master;
  605. struct resource *res, *ires;
  606. struct bfin_sport_spi_master_data *drv_data;
  607. int status;
  608. platform_info = dev_get_platdata(dev);
  609. /* Allocate master with space for drv_data */
  610. master = spi_alloc_master(dev, sizeof(*master) + 16);
  611. if (!master) {
  612. dev_err(dev, "cannot alloc spi_master\n");
  613. return -ENOMEM;
  614. }
  615. drv_data = spi_master_get_devdata(master);
  616. drv_data->master = master;
  617. drv_data->dev = dev;
  618. drv_data->pin_req = platform_info->pin_req;
  619. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
  620. master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
  621. master->bus_num = pdev->id;
  622. master->num_chipselect = platform_info->num_chipselect;
  623. master->cleanup = bfin_sport_spi_cleanup;
  624. master->setup = bfin_sport_spi_setup;
  625. master->transfer = bfin_sport_spi_transfer;
  626. /* Find and map our resources */
  627. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  628. if (res == NULL) {
  629. dev_err(dev, "cannot get IORESOURCE_MEM\n");
  630. status = -ENOENT;
  631. goto out_error_get_res;
  632. }
  633. drv_data->regs = ioremap(res->start, resource_size(res));
  634. if (drv_data->regs == NULL) {
  635. dev_err(dev, "cannot map registers\n");
  636. status = -ENXIO;
  637. goto out_error_ioremap;
  638. }
  639. ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  640. if (!ires) {
  641. dev_err(dev, "cannot get IORESOURCE_IRQ\n");
  642. status = -ENODEV;
  643. goto out_error_get_ires;
  644. }
  645. drv_data->err_irq = ires->start;
  646. /* Initial and start queue */
  647. status = bfin_sport_spi_init_queue(drv_data);
  648. if (status) {
  649. dev_err(dev, "problem initializing queue\n");
  650. goto out_error_queue_alloc;
  651. }
  652. status = bfin_sport_spi_start_queue(drv_data);
  653. if (status) {
  654. dev_err(dev, "problem starting queue\n");
  655. goto out_error_queue_alloc;
  656. }
  657. status = request_irq(drv_data->err_irq, sport_err_handler,
  658. 0, "sport_spi_err", drv_data);
  659. if (status) {
  660. dev_err(dev, "unable to request sport err irq\n");
  661. goto out_error_irq;
  662. }
  663. status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
  664. if (status) {
  665. dev_err(dev, "requesting peripherals failed\n");
  666. goto out_error_peripheral;
  667. }
  668. /* Register with the SPI framework */
  669. platform_set_drvdata(pdev, drv_data);
  670. status = spi_register_master(master);
  671. if (status) {
  672. dev_err(dev, "problem registering spi master\n");
  673. goto out_error_master;
  674. }
  675. dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
  676. return 0;
  677. out_error_master:
  678. peripheral_free_list(drv_data->pin_req);
  679. out_error_peripheral:
  680. free_irq(drv_data->err_irq, drv_data);
  681. out_error_irq:
  682. out_error_queue_alloc:
  683. bfin_sport_spi_destroy_queue(drv_data);
  684. out_error_get_ires:
  685. iounmap(drv_data->regs);
  686. out_error_ioremap:
  687. out_error_get_res:
  688. spi_master_put(master);
  689. return status;
  690. }
  691. /* stop hardware and remove the driver */
  692. static int bfin_sport_spi_remove(struct platform_device *pdev)
  693. {
  694. struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
  695. int status = 0;
  696. if (!drv_data)
  697. return 0;
  698. /* Remove the queue */
  699. status = bfin_sport_spi_destroy_queue(drv_data);
  700. if (status)
  701. return status;
  702. /* Disable the SSP at the peripheral and SOC level */
  703. bfin_sport_spi_disable(drv_data);
  704. /* Disconnect from the SPI framework */
  705. spi_unregister_master(drv_data->master);
  706. peripheral_free_list(drv_data->pin_req);
  707. return 0;
  708. }
  709. #ifdef CONFIG_PM_SLEEP
  710. static int bfin_sport_spi_suspend(struct device *dev)
  711. {
  712. struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
  713. int status;
  714. status = bfin_sport_spi_stop_queue(drv_data);
  715. if (status)
  716. return status;
  717. /* stop hardware */
  718. bfin_sport_spi_disable(drv_data);
  719. return status;
  720. }
  721. static int bfin_sport_spi_resume(struct device *dev)
  722. {
  723. struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
  724. int status;
  725. /* Enable the SPI interface */
  726. bfin_sport_spi_enable(drv_data);
  727. /* Start the queue running */
  728. status = bfin_sport_spi_start_queue(drv_data);
  729. if (status)
  730. dev_err(drv_data->dev, "problem resuming queue\n");
  731. return status;
  732. }
  733. static SIMPLE_DEV_PM_OPS(bfin_sport_spi_pm_ops, bfin_sport_spi_suspend,
  734. bfin_sport_spi_resume);
  735. #define BFIN_SPORT_SPI_PM_OPS (&bfin_sport_spi_pm_ops)
  736. #else
  737. #define BFIN_SPORT_SPI_PM_OPS NULL
  738. #endif
  739. static struct platform_driver bfin_sport_spi_driver = {
  740. .driver = {
  741. .name = DRV_NAME,
  742. .pm = BFIN_SPORT_SPI_PM_OPS,
  743. },
  744. .probe = bfin_sport_spi_probe,
  745. .remove = bfin_sport_spi_remove,
  746. };
  747. module_platform_driver(bfin_sport_spi_driver);