cgu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * Ingenic SoC CGU driver
  3. *
  4. * Copyright (c) 2013-2015 Imagination Technologies
  5. * Author: Paul Burton <[email protected]>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation; either version 2 of
  10. * the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/bitops.h>
  18. #include <linux/clk.h>
  19. #include <linux/clk-provider.h>
  20. #include <linux/clkdev.h>
  21. #include <linux/delay.h>
  22. #include <linux/math64.h>
  23. #include <linux/of.h>
  24. #include <linux/of_address.h>
  25. #include <linux/slab.h>
  26. #include <linux/spinlock.h>
  27. #include "cgu.h"
  28. #define MHZ (1000 * 1000)
  29. /**
  30. * ingenic_cgu_gate_get() - get the value of clock gate register bit
  31. * @cgu: reference to the CGU whose registers should be read
  32. * @info: info struct describing the gate bit
  33. *
  34. * Retrieves the state of the clock gate bit described by info. The
  35. * caller must hold cgu->lock.
  36. *
  37. * Return: true if the gate bit is set, else false.
  38. */
  39. static inline bool
  40. ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
  41. const struct ingenic_cgu_gate_info *info)
  42. {
  43. return readl(cgu->base + info->reg) & BIT(info->bit);
  44. }
  45. /**
  46. * ingenic_cgu_gate_set() - set the value of clock gate register bit
  47. * @cgu: reference to the CGU whose registers should be modified
  48. * @info: info struct describing the gate bit
  49. * @val: non-zero to gate a clock, otherwise zero
  50. *
  51. * Sets the given gate bit in order to gate or ungate a clock.
  52. *
  53. * The caller must hold cgu->lock.
  54. */
  55. static inline void
  56. ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
  57. const struct ingenic_cgu_gate_info *info, bool val)
  58. {
  59. u32 clkgr = readl(cgu->base + info->reg);
  60. if (val)
  61. clkgr |= BIT(info->bit);
  62. else
  63. clkgr &= ~BIT(info->bit);
  64. writel(clkgr, cgu->base + info->reg);
  65. }
  66. /*
  67. * PLL operations
  68. */
  69. static unsigned long
  70. ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  71. {
  72. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  73. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  74. const struct ingenic_cgu_clk_info *clk_info;
  75. const struct ingenic_cgu_pll_info *pll_info;
  76. unsigned m, n, od_enc, od;
  77. bool bypass, enable;
  78. unsigned long flags;
  79. u32 ctl;
  80. clk_info = &cgu->clock_info[ingenic_clk->idx];
  81. BUG_ON(clk_info->type != CGU_CLK_PLL);
  82. pll_info = &clk_info->pll;
  83. spin_lock_irqsave(&cgu->lock, flags);
  84. ctl = readl(cgu->base + pll_info->reg);
  85. spin_unlock_irqrestore(&cgu->lock, flags);
  86. m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
  87. m += pll_info->m_offset;
  88. n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
  89. n += pll_info->n_offset;
  90. od_enc = ctl >> pll_info->od_shift;
  91. od_enc &= GENMASK(pll_info->od_bits - 1, 0);
  92. bypass = !!(ctl & BIT(pll_info->bypass_bit));
  93. enable = !!(ctl & BIT(pll_info->enable_bit));
  94. if (bypass)
  95. return parent_rate;
  96. if (!enable)
  97. return 0;
  98. for (od = 0; od < pll_info->od_max; od++) {
  99. if (pll_info->od_encoding[od] == od_enc)
  100. break;
  101. }
  102. BUG_ON(od == pll_info->od_max);
  103. od++;
  104. return div_u64((u64)parent_rate * m, n * od);
  105. }
  106. static unsigned long
  107. ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
  108. unsigned long rate, unsigned long parent_rate,
  109. unsigned *pm, unsigned *pn, unsigned *pod)
  110. {
  111. const struct ingenic_cgu_pll_info *pll_info;
  112. unsigned m, n, od;
  113. pll_info = &clk_info->pll;
  114. od = 1;
  115. /*
  116. * The frequency after the input divider must be between 10 and 50 MHz.
  117. * The highest divider yields the best resolution.
  118. */
  119. n = parent_rate / (10 * MHZ);
  120. n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
  121. n = max_t(unsigned, n, pll_info->n_offset);
  122. m = (rate / MHZ) * od * n / (parent_rate / MHZ);
  123. m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
  124. m = max_t(unsigned, m, pll_info->m_offset);
  125. if (pm)
  126. *pm = m;
  127. if (pn)
  128. *pn = n;
  129. if (pod)
  130. *pod = od;
  131. return div_u64((u64)parent_rate * m, n * od);
  132. }
  133. static long
  134. ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
  135. unsigned long *prate)
  136. {
  137. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  138. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  139. const struct ingenic_cgu_clk_info *clk_info;
  140. clk_info = &cgu->clock_info[ingenic_clk->idx];
  141. BUG_ON(clk_info->type != CGU_CLK_PLL);
  142. return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
  143. }
  144. static int
  145. ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
  146. unsigned long parent_rate)
  147. {
  148. const unsigned timeout = 100;
  149. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  150. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  151. const struct ingenic_cgu_clk_info *clk_info;
  152. const struct ingenic_cgu_pll_info *pll_info;
  153. unsigned long rate, flags;
  154. unsigned m, n, od, i;
  155. u32 ctl;
  156. clk_info = &cgu->clock_info[ingenic_clk->idx];
  157. BUG_ON(clk_info->type != CGU_CLK_PLL);
  158. pll_info = &clk_info->pll;
  159. rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
  160. &m, &n, &od);
  161. if (rate != req_rate)
  162. pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
  163. clk_info->name, req_rate, rate);
  164. spin_lock_irqsave(&cgu->lock, flags);
  165. ctl = readl(cgu->base + pll_info->reg);
  166. ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
  167. ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
  168. ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
  169. ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
  170. ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
  171. ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
  172. ctl &= ~BIT(pll_info->bypass_bit);
  173. ctl |= BIT(pll_info->enable_bit);
  174. writel(ctl, cgu->base + pll_info->reg);
  175. /* wait for the PLL to stabilise */
  176. for (i = 0; i < timeout; i++) {
  177. ctl = readl(cgu->base + pll_info->reg);
  178. if (ctl & BIT(pll_info->stable_bit))
  179. break;
  180. mdelay(1);
  181. }
  182. spin_unlock_irqrestore(&cgu->lock, flags);
  183. if (i == timeout)
  184. return -EBUSY;
  185. return 0;
  186. }
  187. static const struct clk_ops ingenic_pll_ops = {
  188. .recalc_rate = ingenic_pll_recalc_rate,
  189. .round_rate = ingenic_pll_round_rate,
  190. .set_rate = ingenic_pll_set_rate,
  191. };
  192. /*
  193. * Operations for all non-PLL clocks
  194. */
  195. static u8 ingenic_clk_get_parent(struct clk_hw *hw)
  196. {
  197. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  198. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  199. const struct ingenic_cgu_clk_info *clk_info;
  200. u32 reg;
  201. u8 i, hw_idx, idx = 0;
  202. clk_info = &cgu->clock_info[ingenic_clk->idx];
  203. if (clk_info->type & CGU_CLK_MUX) {
  204. reg = readl(cgu->base + clk_info->mux.reg);
  205. hw_idx = (reg >> clk_info->mux.shift) &
  206. GENMASK(clk_info->mux.bits - 1, 0);
  207. /*
  208. * Convert the hardware index to the parent index by skipping
  209. * over any -1's in the parents array.
  210. */
  211. for (i = 0; i < hw_idx; i++) {
  212. if (clk_info->parents[i] != -1)
  213. idx++;
  214. }
  215. }
  216. return idx;
  217. }
  218. static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
  219. {
  220. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  221. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  222. const struct ingenic_cgu_clk_info *clk_info;
  223. unsigned long flags;
  224. u8 curr_idx, hw_idx, num_poss;
  225. u32 reg, mask;
  226. clk_info = &cgu->clock_info[ingenic_clk->idx];
  227. if (clk_info->type & CGU_CLK_MUX) {
  228. /*
  229. * Convert the parent index to the hardware index by adding
  230. * 1 for any -1 in the parents array preceding the given
  231. * index. That is, we want the index of idx'th entry in
  232. * clk_info->parents which does not equal -1.
  233. */
  234. hw_idx = curr_idx = 0;
  235. num_poss = 1 << clk_info->mux.bits;
  236. for (; hw_idx < num_poss; hw_idx++) {
  237. if (clk_info->parents[hw_idx] == -1)
  238. continue;
  239. if (curr_idx == idx)
  240. break;
  241. curr_idx++;
  242. }
  243. /* idx should always be a valid parent */
  244. BUG_ON(curr_idx != idx);
  245. mask = GENMASK(clk_info->mux.bits - 1, 0);
  246. mask <<= clk_info->mux.shift;
  247. spin_lock_irqsave(&cgu->lock, flags);
  248. /* write the register */
  249. reg = readl(cgu->base + clk_info->mux.reg);
  250. reg &= ~mask;
  251. reg |= hw_idx << clk_info->mux.shift;
  252. writel(reg, cgu->base + clk_info->mux.reg);
  253. spin_unlock_irqrestore(&cgu->lock, flags);
  254. return 0;
  255. }
  256. return idx ? -EINVAL : 0;
  257. }
  258. static unsigned long
  259. ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  260. {
  261. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  262. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  263. const struct ingenic_cgu_clk_info *clk_info;
  264. unsigned long rate = parent_rate;
  265. u32 div_reg, div;
  266. clk_info = &cgu->clock_info[ingenic_clk->idx];
  267. if (clk_info->type & CGU_CLK_DIV) {
  268. div_reg = readl(cgu->base + clk_info->div.reg);
  269. div = (div_reg >> clk_info->div.shift) &
  270. GENMASK(clk_info->div.bits - 1, 0);
  271. div += 1;
  272. div *= clk_info->div.div;
  273. rate /= div;
  274. }
  275. return rate;
  276. }
  277. static unsigned
  278. ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
  279. unsigned long parent_rate, unsigned long req_rate)
  280. {
  281. unsigned div;
  282. /* calculate the divide */
  283. div = DIV_ROUND_UP(parent_rate, req_rate);
  284. /* and impose hardware constraints */
  285. div = min_t(unsigned, div, 1 << clk_info->div.bits);
  286. div = max_t(unsigned, div, 1);
  287. /*
  288. * If the divider value itself must be divided before being written to
  289. * the divider register, we must ensure we don't have any bits set that
  290. * would be lost as a result of doing so.
  291. */
  292. div /= clk_info->div.div;
  293. div *= clk_info->div.div;
  294. return div;
  295. }
  296. static long
  297. ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
  298. unsigned long *parent_rate)
  299. {
  300. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  301. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  302. const struct ingenic_cgu_clk_info *clk_info;
  303. unsigned int div = 1;
  304. clk_info = &cgu->clock_info[ingenic_clk->idx];
  305. if (clk_info->type & CGU_CLK_DIV)
  306. div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
  307. else if (clk_info->type & CGU_CLK_FIXDIV)
  308. div = clk_info->fixdiv.div;
  309. return DIV_ROUND_UP(*parent_rate, div);
  310. }
  311. static int
  312. ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
  313. unsigned long parent_rate)
  314. {
  315. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  316. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  317. const struct ingenic_cgu_clk_info *clk_info;
  318. const unsigned timeout = 100;
  319. unsigned long rate, flags;
  320. unsigned div, i;
  321. u32 reg, mask;
  322. int ret = 0;
  323. clk_info = &cgu->clock_info[ingenic_clk->idx];
  324. if (clk_info->type & CGU_CLK_DIV) {
  325. div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
  326. rate = DIV_ROUND_UP(parent_rate, div);
  327. if (rate != req_rate)
  328. return -EINVAL;
  329. spin_lock_irqsave(&cgu->lock, flags);
  330. reg = readl(cgu->base + clk_info->div.reg);
  331. /* update the divide */
  332. mask = GENMASK(clk_info->div.bits - 1, 0);
  333. reg &= ~(mask << clk_info->div.shift);
  334. reg |= ((div / clk_info->div.div) - 1) << clk_info->div.shift;
  335. /* clear the stop bit */
  336. if (clk_info->div.stop_bit != -1)
  337. reg &= ~BIT(clk_info->div.stop_bit);
  338. /* set the change enable bit */
  339. if (clk_info->div.ce_bit != -1)
  340. reg |= BIT(clk_info->div.ce_bit);
  341. /* update the hardware */
  342. writel(reg, cgu->base + clk_info->div.reg);
  343. /* wait for the change to take effect */
  344. if (clk_info->div.busy_bit != -1) {
  345. for (i = 0; i < timeout; i++) {
  346. reg = readl(cgu->base + clk_info->div.reg);
  347. if (!(reg & BIT(clk_info->div.busy_bit)))
  348. break;
  349. mdelay(1);
  350. }
  351. if (i == timeout)
  352. ret = -EBUSY;
  353. }
  354. spin_unlock_irqrestore(&cgu->lock, flags);
  355. return ret;
  356. }
  357. return -EINVAL;
  358. }
  359. static int ingenic_clk_enable(struct clk_hw *hw)
  360. {
  361. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  362. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  363. const struct ingenic_cgu_clk_info *clk_info;
  364. unsigned long flags;
  365. clk_info = &cgu->clock_info[ingenic_clk->idx];
  366. if (clk_info->type & CGU_CLK_GATE) {
  367. /* ungate the clock */
  368. spin_lock_irqsave(&cgu->lock, flags);
  369. ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
  370. spin_unlock_irqrestore(&cgu->lock, flags);
  371. }
  372. return 0;
  373. }
  374. static void ingenic_clk_disable(struct clk_hw *hw)
  375. {
  376. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  377. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  378. const struct ingenic_cgu_clk_info *clk_info;
  379. unsigned long flags;
  380. clk_info = &cgu->clock_info[ingenic_clk->idx];
  381. if (clk_info->type & CGU_CLK_GATE) {
  382. /* gate the clock */
  383. spin_lock_irqsave(&cgu->lock, flags);
  384. ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
  385. spin_unlock_irqrestore(&cgu->lock, flags);
  386. }
  387. }
  388. static int ingenic_clk_is_enabled(struct clk_hw *hw)
  389. {
  390. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  391. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  392. const struct ingenic_cgu_clk_info *clk_info;
  393. unsigned long flags;
  394. int enabled = 1;
  395. clk_info = &cgu->clock_info[ingenic_clk->idx];
  396. if (clk_info->type & CGU_CLK_GATE) {
  397. spin_lock_irqsave(&cgu->lock, flags);
  398. enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
  399. spin_unlock_irqrestore(&cgu->lock, flags);
  400. }
  401. return enabled;
  402. }
  403. static const struct clk_ops ingenic_clk_ops = {
  404. .get_parent = ingenic_clk_get_parent,
  405. .set_parent = ingenic_clk_set_parent,
  406. .recalc_rate = ingenic_clk_recalc_rate,
  407. .round_rate = ingenic_clk_round_rate,
  408. .set_rate = ingenic_clk_set_rate,
  409. .enable = ingenic_clk_enable,
  410. .disable = ingenic_clk_disable,
  411. .is_enabled = ingenic_clk_is_enabled,
  412. };
  413. /*
  414. * Setup functions.
  415. */
  416. static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
  417. {
  418. const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
  419. struct clk_init_data clk_init;
  420. struct ingenic_clk *ingenic_clk = NULL;
  421. struct clk *clk, *parent;
  422. const char *parent_names[4];
  423. unsigned caps, i, num_possible;
  424. int err = -EINVAL;
  425. BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
  426. if (clk_info->type == CGU_CLK_EXT) {
  427. clk = of_clk_get_by_name(cgu->np, clk_info->name);
  428. if (IS_ERR(clk)) {
  429. pr_err("%s: no external clock '%s' provided\n",
  430. __func__, clk_info->name);
  431. err = -ENODEV;
  432. goto out;
  433. }
  434. err = clk_register_clkdev(clk, clk_info->name, NULL);
  435. if (err) {
  436. clk_put(clk);
  437. goto out;
  438. }
  439. cgu->clocks.clks[idx] = clk;
  440. return 0;
  441. }
  442. if (!clk_info->type) {
  443. pr_err("%s: no clock type specified for '%s'\n", __func__,
  444. clk_info->name);
  445. goto out;
  446. }
  447. ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
  448. if (!ingenic_clk) {
  449. err = -ENOMEM;
  450. goto out;
  451. }
  452. ingenic_clk->hw.init = &clk_init;
  453. ingenic_clk->cgu = cgu;
  454. ingenic_clk->idx = idx;
  455. clk_init.name = clk_info->name;
  456. clk_init.flags = 0;
  457. clk_init.parent_names = parent_names;
  458. caps = clk_info->type;
  459. if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
  460. clk_init.num_parents = 0;
  461. if (caps & CGU_CLK_MUX)
  462. num_possible = 1 << clk_info->mux.bits;
  463. else
  464. num_possible = ARRAY_SIZE(clk_info->parents);
  465. for (i = 0; i < num_possible; i++) {
  466. if (clk_info->parents[i] == -1)
  467. continue;
  468. parent = cgu->clocks.clks[clk_info->parents[i]];
  469. parent_names[clk_init.num_parents] =
  470. __clk_get_name(parent);
  471. clk_init.num_parents++;
  472. }
  473. BUG_ON(!clk_init.num_parents);
  474. BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
  475. } else {
  476. BUG_ON(clk_info->parents[0] == -1);
  477. clk_init.num_parents = 1;
  478. parent = cgu->clocks.clks[clk_info->parents[0]];
  479. parent_names[0] = __clk_get_name(parent);
  480. }
  481. if (caps & CGU_CLK_CUSTOM) {
  482. clk_init.ops = clk_info->custom.clk_ops;
  483. caps &= ~CGU_CLK_CUSTOM;
  484. if (caps) {
  485. pr_err("%s: custom clock may not be combined with type 0x%x\n",
  486. __func__, caps);
  487. goto out;
  488. }
  489. } else if (caps & CGU_CLK_PLL) {
  490. clk_init.ops = &ingenic_pll_ops;
  491. caps &= ~CGU_CLK_PLL;
  492. if (caps) {
  493. pr_err("%s: PLL may not be combined with type 0x%x\n",
  494. __func__, caps);
  495. goto out;
  496. }
  497. } else {
  498. clk_init.ops = &ingenic_clk_ops;
  499. }
  500. /* nothing to do for gates or fixed dividers */
  501. caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
  502. if (caps & CGU_CLK_MUX) {
  503. if (!(caps & CGU_CLK_MUX_GLITCHFREE))
  504. clk_init.flags |= CLK_SET_PARENT_GATE;
  505. caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
  506. }
  507. if (caps & CGU_CLK_DIV) {
  508. caps &= ~CGU_CLK_DIV;
  509. } else {
  510. /* pass rate changes to the parent clock */
  511. clk_init.flags |= CLK_SET_RATE_PARENT;
  512. }
  513. if (caps) {
  514. pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
  515. goto out;
  516. }
  517. clk = clk_register(NULL, &ingenic_clk->hw);
  518. if (IS_ERR(clk)) {
  519. pr_err("%s: failed to register clock '%s'\n", __func__,
  520. clk_info->name);
  521. err = PTR_ERR(clk);
  522. goto out;
  523. }
  524. err = clk_register_clkdev(clk, clk_info->name, NULL);
  525. if (err)
  526. goto out;
  527. cgu->clocks.clks[idx] = clk;
  528. out:
  529. if (err)
  530. kfree(ingenic_clk);
  531. return err;
  532. }
  533. struct ingenic_cgu *
  534. ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
  535. unsigned num_clocks, struct device_node *np)
  536. {
  537. struct ingenic_cgu *cgu;
  538. cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
  539. if (!cgu)
  540. goto err_out;
  541. cgu->base = of_iomap(np, 0);
  542. if (!cgu->base) {
  543. pr_err("%s: failed to map CGU registers\n", __func__);
  544. goto err_out_free;
  545. }
  546. cgu->np = np;
  547. cgu->clock_info = clock_info;
  548. cgu->clocks.clk_num = num_clocks;
  549. spin_lock_init(&cgu->lock);
  550. return cgu;
  551. err_out_free:
  552. kfree(cgu);
  553. err_out:
  554. return NULL;
  555. }
  556. int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
  557. {
  558. unsigned i;
  559. int err;
  560. cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
  561. GFP_KERNEL);
  562. if (!cgu->clocks.clks) {
  563. err = -ENOMEM;
  564. goto err_out;
  565. }
  566. for (i = 0; i < cgu->clocks.clk_num; i++) {
  567. err = ingenic_register_clock(cgu, i);
  568. if (err)
  569. goto err_out_unregister;
  570. }
  571. err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
  572. &cgu->clocks);
  573. if (err)
  574. goto err_out_unregister;
  575. return 0;
  576. err_out_unregister:
  577. for (i = 0; i < cgu->clocks.clk_num; i++) {
  578. if (!cgu->clocks.clks[i])
  579. continue;
  580. if (cgu->clock_info[i].type & CGU_CLK_EXT)
  581. clk_put(cgu->clocks.clks[i]);
  582. else
  583. clk_unregister(cgu->clocks.clks[i]);
  584. }
  585. kfree(cgu->clocks.clks);
  586. err_out:
  587. return err;
  588. }