1 /*
2 * OMAP DPLL clock support
3 *
4 * Copyright (C) 2013 Texas Instruments, Inc.
5 *
6 * Tero Kristo <t-kristo@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
20 #include <linux/slab.h>
21 #include <linux/err.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/clk/ti.h>
25 #include "clock.h"
26
27 #undef pr_fmt
28 #define pr_fmt(fmt) "%s: " fmt, __func__
29
30 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
31 defined(CONFIG_SOC_DRA7XX)
32 static const struct clk_ops dpll_m4xen_ck_ops = {
33 .enable = &omap3_noncore_dpll_enable,
34 .disable = &omap3_noncore_dpll_disable,
35 .recalc_rate = &omap4_dpll_regm4xen_recalc,
36 .round_rate = &omap4_dpll_regm4xen_round_rate,
37 .set_rate = &omap3_noncore_dpll_set_rate,
38 .set_parent = &omap3_noncore_dpll_set_parent,
39 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
40 .determine_rate = &omap4_dpll_regm4xen_determine_rate,
41 .get_parent = &omap2_init_dpll_parent,
42 };
43 #else
44 static const struct clk_ops dpll_m4xen_ck_ops = {};
45 #endif
46
47 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
48 defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
49 defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
50 static const struct clk_ops dpll_core_ck_ops = {
51 .recalc_rate = &omap3_dpll_recalc,
52 .get_parent = &omap2_init_dpll_parent,
53 };
54
55 static const struct clk_ops dpll_ck_ops = {
56 .enable = &omap3_noncore_dpll_enable,
57 .disable = &omap3_noncore_dpll_disable,
58 .recalc_rate = &omap3_dpll_recalc,
59 .round_rate = &omap2_dpll_round_rate,
60 .set_rate = &omap3_noncore_dpll_set_rate,
61 .set_parent = &omap3_noncore_dpll_set_parent,
62 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
63 .determine_rate = &omap3_noncore_dpll_determine_rate,
64 .get_parent = &omap2_init_dpll_parent,
65 };
66
67 static const struct clk_ops dpll_no_gate_ck_ops = {
68 .recalc_rate = &omap3_dpll_recalc,
69 .get_parent = &omap2_init_dpll_parent,
70 .round_rate = &omap2_dpll_round_rate,
71 .set_rate = &omap3_noncore_dpll_set_rate,
72 .set_parent = &omap3_noncore_dpll_set_parent,
73 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
74 .determine_rate = &omap3_noncore_dpll_determine_rate,
75 };
76 #else
77 static const struct clk_ops dpll_core_ck_ops = {};
78 static const struct clk_ops dpll_ck_ops = {};
79 static const struct clk_ops dpll_no_gate_ck_ops = {};
80 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
81 #endif
82
83 #ifdef CONFIG_ARCH_OMAP2
84 static const struct clk_ops omap2_dpll_core_ck_ops = {
85 .get_parent = &omap2_init_dpll_parent,
86 .recalc_rate = &omap2_dpllcore_recalc,
87 .round_rate = &omap2_dpll_round_rate,
88 .set_rate = &omap2_reprogram_dpllcore,
89 };
90 #else
91 static const struct clk_ops omap2_dpll_core_ck_ops = {};
92 #endif
93
94 #ifdef CONFIG_ARCH_OMAP3
95 static const struct clk_ops omap3_dpll_core_ck_ops = {
96 .get_parent = &omap2_init_dpll_parent,
97 .recalc_rate = &omap3_dpll_recalc,
98 .round_rate = &omap2_dpll_round_rate,
99 };
100 #else
101 static const struct clk_ops omap3_dpll_core_ck_ops = {};
102 #endif
103
104 #ifdef CONFIG_ARCH_OMAP3
105 static const struct clk_ops omap3_dpll_ck_ops = {
106 .enable = &omap3_noncore_dpll_enable,
107 .disable = &omap3_noncore_dpll_disable,
108 .get_parent = &omap2_init_dpll_parent,
109 .recalc_rate = &omap3_dpll_recalc,
110 .set_rate = &omap3_noncore_dpll_set_rate,
111 .set_parent = &omap3_noncore_dpll_set_parent,
112 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
113 .determine_rate = &omap3_noncore_dpll_determine_rate,
114 .round_rate = &omap2_dpll_round_rate,
115 };
116
117 static const struct clk_ops omap3_dpll_per_ck_ops = {
118 .enable = &omap3_noncore_dpll_enable,
119 .disable = &omap3_noncore_dpll_disable,
120 .get_parent = &omap2_init_dpll_parent,
121 .recalc_rate = &omap3_dpll_recalc,
122 .set_rate = &omap3_dpll4_set_rate,
123 .set_parent = &omap3_noncore_dpll_set_parent,
124 .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
125 .determine_rate = &omap3_noncore_dpll_determine_rate,
126 .round_rate = &omap2_dpll_round_rate,
127 };
128 #endif
129
130 static const struct clk_ops dpll_x2_ck_ops = {
131 .recalc_rate = &omap3_clkoutx2_recalc,
132 };
133
134 /**
135 * _register_dpll - low level registration of a DPLL clock
136 * @hw: hardware clock definition for the clock
137 * @node: device node for the clock
138 *
139 * Finalizes DPLL registration process. In case a failure (clk-ref or
140 * clk-bypass is missing), the clock is added to retry list and
141 * the initialization is retried on later stage.
142 */
_register_dpll(struct clk_hw * hw,struct device_node * node)143 static void __init _register_dpll(struct clk_hw *hw,
144 struct device_node *node)
145 {
146 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
147 struct dpll_data *dd = clk_hw->dpll_data;
148 struct clk *clk;
149
150 dd->clk_ref = of_clk_get(node, 0);
151 dd->clk_bypass = of_clk_get(node, 1);
152
153 if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
154 pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
155 node->name);
156 if (!ti_clk_retry_init(node, hw, _register_dpll))
157 return;
158
159 goto cleanup;
160 }
161
162 /* register the clock */
163 clk = clk_register(NULL, &clk_hw->hw);
164
165 if (!IS_ERR(clk)) {
166 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
167 of_clk_add_provider(node, of_clk_src_simple_get, clk);
168 kfree(clk_hw->hw.init->parent_names);
169 kfree(clk_hw->hw.init);
170 return;
171 }
172
173 cleanup:
174 kfree(clk_hw->dpll_data);
175 kfree(clk_hw->hw.init->parent_names);
176 kfree(clk_hw->hw.init);
177 kfree(clk_hw);
178 }
179
180 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
_get_reg(u8 module,u16 offset)181 static void __iomem *_get_reg(u8 module, u16 offset)
182 {
183 u32 reg;
184 struct clk_omap_reg *reg_setup;
185
186 reg_setup = (struct clk_omap_reg *)®
187
188 reg_setup->index = module;
189 reg_setup->offset = offset;
190
191 return (void __iomem *)reg;
192 }
193
ti_clk_register_dpll(struct ti_clk * setup)194 struct clk *ti_clk_register_dpll(struct ti_clk *setup)
195 {
196 struct clk_hw_omap *clk_hw;
197 struct clk_init_data init = { NULL };
198 struct dpll_data *dd;
199 struct clk *clk;
200 struct ti_clk_dpll *dpll;
201 const struct clk_ops *ops = &omap3_dpll_ck_ops;
202 struct clk *clk_ref;
203 struct clk *clk_bypass;
204
205 dpll = setup->data;
206
207 if (dpll->num_parents < 2)
208 return ERR_PTR(-EINVAL);
209
210 clk_ref = clk_get_sys(NULL, dpll->parents[0]);
211 clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
212
213 if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
214 return ERR_PTR(-EAGAIN);
215
216 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
217 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
218 if (!dd || !clk_hw) {
219 clk = ERR_PTR(-ENOMEM);
220 goto cleanup;
221 }
222
223 clk_hw->dpll_data = dd;
224 clk_hw->ops = &clkhwops_omap3_dpll;
225 clk_hw->hw.init = &init;
226 clk_hw->flags = MEMMAP_ADDRESSING;
227
228 init.name = setup->name;
229 init.ops = ops;
230
231 init.num_parents = dpll->num_parents;
232 init.parent_names = dpll->parents;
233
234 dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
235 dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
236 dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
237 dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
238
239 dd->modes = dpll->modes;
240 dd->div1_mask = dpll->div1_mask;
241 dd->idlest_mask = dpll->idlest_mask;
242 dd->mult_mask = dpll->mult_mask;
243 dd->autoidle_mask = dpll->autoidle_mask;
244 dd->enable_mask = dpll->enable_mask;
245 dd->sddiv_mask = dpll->sddiv_mask;
246 dd->dco_mask = dpll->dco_mask;
247 dd->max_divider = dpll->max_divider;
248 dd->min_divider = dpll->min_divider;
249 dd->max_multiplier = dpll->max_multiplier;
250 dd->auto_recal_bit = dpll->auto_recal_bit;
251 dd->recal_en_bit = dpll->recal_en_bit;
252 dd->recal_st_bit = dpll->recal_st_bit;
253
254 dd->clk_ref = clk_ref;
255 dd->clk_bypass = clk_bypass;
256
257 if (dpll->flags & CLKF_CORE)
258 ops = &omap3_dpll_core_ck_ops;
259
260 if (dpll->flags & CLKF_PER)
261 ops = &omap3_dpll_per_ck_ops;
262
263 if (dpll->flags & CLKF_J_TYPE)
264 dd->flags |= DPLL_J_TYPE;
265
266 clk = clk_register(NULL, &clk_hw->hw);
267
268 if (!IS_ERR(clk))
269 return clk;
270
271 cleanup:
272 kfree(dd);
273 kfree(clk_hw);
274 return clk;
275 }
276 #endif
277
278 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
279 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
280 defined(CONFIG_SOC_AM43XX)
281 /**
282 * _register_dpll_x2 - Registers a DPLLx2 clock
283 * @node: device node for this clock
284 * @ops: clk_ops for this clock
285 * @hw_ops: clk_hw_ops for this clock
286 *
287 * Initializes a DPLL x 2 clock from device tree data.
288 */
_register_dpll_x2(struct device_node * node,const struct clk_ops * ops,const struct clk_hw_omap_ops * hw_ops)289 static void _register_dpll_x2(struct device_node *node,
290 const struct clk_ops *ops,
291 const struct clk_hw_omap_ops *hw_ops)
292 {
293 struct clk *clk;
294 struct clk_init_data init = { NULL };
295 struct clk_hw_omap *clk_hw;
296 const char *name = node->name;
297 const char *parent_name;
298
299 parent_name = of_clk_get_parent_name(node, 0);
300 if (!parent_name) {
301 pr_err("%s must have parent\n", node->name);
302 return;
303 }
304
305 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
306 if (!clk_hw)
307 return;
308
309 clk_hw->ops = hw_ops;
310 clk_hw->hw.init = &init;
311
312 init.name = name;
313 init.ops = ops;
314 init.parent_names = &parent_name;
315 init.num_parents = 1;
316
317 /* register the clock */
318 clk = clk_register(NULL, &clk_hw->hw);
319
320 if (IS_ERR(clk)) {
321 kfree(clk_hw);
322 } else {
323 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
324 of_clk_add_provider(node, of_clk_src_simple_get, clk);
325 }
326 }
327 #endif
328
329 /**
330 * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
331 * @node: device node containing the DPLL info
332 * @ops: ops for the DPLL
333 * @ddt: DPLL data template to use
334 *
335 * Initializes a DPLL clock from device tree data.
336 */
of_ti_dpll_setup(struct device_node * node,const struct clk_ops * ops,const struct dpll_data * ddt)337 static void __init of_ti_dpll_setup(struct device_node *node,
338 const struct clk_ops *ops,
339 const struct dpll_data *ddt)
340 {
341 struct clk_hw_omap *clk_hw = NULL;
342 struct clk_init_data *init = NULL;
343 const char **parent_names = NULL;
344 struct dpll_data *dd = NULL;
345 u8 dpll_mode = 0;
346
347 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
348 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
349 init = kzalloc(sizeof(*init), GFP_KERNEL);
350 if (!dd || !clk_hw || !init)
351 goto cleanup;
352
353 memcpy(dd, ddt, sizeof(*dd));
354
355 clk_hw->dpll_data = dd;
356 clk_hw->ops = &clkhwops_omap3_dpll;
357 clk_hw->hw.init = init;
358 clk_hw->flags = MEMMAP_ADDRESSING;
359
360 init->name = node->name;
361 init->ops = ops;
362
363 init->num_parents = of_clk_get_parent_count(node);
364 if (init->num_parents < 1) {
365 pr_err("%s must have parent(s)\n", node->name);
366 goto cleanup;
367 }
368
369 parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
370 if (!parent_names)
371 goto cleanup;
372
373 of_clk_parent_fill(node, parent_names, init->num_parents);
374
375 init->parent_names = parent_names;
376
377 dd->control_reg = ti_clk_get_reg_addr(node, 0);
378
379 /*
380 * Special case for OMAP2 DPLL, register order is different due to
381 * missing idlest_reg, also clkhwops is different. Detected from
382 * missing idlest_mask.
383 */
384 if (!dd->idlest_mask) {
385 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1);
386 #ifdef CONFIG_ARCH_OMAP2
387 clk_hw->ops = &clkhwops_omap2xxx_dpll;
388 omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
389 #endif
390 } else {
391 dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
392 if (IS_ERR(dd->idlest_reg))
393 goto cleanup;
394
395 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
396 }
397
398 if (IS_ERR(dd->control_reg) || IS_ERR(dd->mult_div1_reg))
399 goto cleanup;
400
401 if (dd->autoidle_mask) {
402 dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
403 if (IS_ERR(dd->autoidle_reg))
404 goto cleanup;
405 }
406
407 if (of_property_read_bool(node, "ti,low-power-stop"))
408 dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
409
410 if (of_property_read_bool(node, "ti,low-power-bypass"))
411 dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
412
413 if (of_property_read_bool(node, "ti,lock"))
414 dpll_mode |= 1 << DPLL_LOCKED;
415
416 if (dpll_mode)
417 dd->modes = dpll_mode;
418
419 _register_dpll(&clk_hw->hw, node);
420 return;
421
422 cleanup:
423 kfree(dd);
424 kfree(parent_names);
425 kfree(init);
426 kfree(clk_hw);
427 }
428
429 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
430 defined(CONFIG_SOC_DRA7XX)
of_ti_omap4_dpll_x2_setup(struct device_node * node)431 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
432 {
433 _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
434 }
435 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
436 of_ti_omap4_dpll_x2_setup);
437 #endif
438
439 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
of_ti_am3_dpll_x2_setup(struct device_node * node)440 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
441 {
442 _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
443 }
444 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
445 of_ti_am3_dpll_x2_setup);
446 #endif
447
448 #ifdef CONFIG_ARCH_OMAP3
of_ti_omap3_dpll_setup(struct device_node * node)449 static void __init of_ti_omap3_dpll_setup(struct device_node *node)
450 {
451 const struct dpll_data dd = {
452 .idlest_mask = 0x1,
453 .enable_mask = 0x7,
454 .autoidle_mask = 0x7,
455 .mult_mask = 0x7ff << 8,
456 .div1_mask = 0x7f,
457 .max_multiplier = 2047,
458 .max_divider = 128,
459 .min_divider = 1,
460 .freqsel_mask = 0xf0,
461 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
462 };
463
464 of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
465 }
466 CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
467 of_ti_omap3_dpll_setup);
468
of_ti_omap3_core_dpll_setup(struct device_node * node)469 static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
470 {
471 const struct dpll_data dd = {
472 .idlest_mask = 0x1,
473 .enable_mask = 0x7,
474 .autoidle_mask = 0x7,
475 .mult_mask = 0x7ff << 16,
476 .div1_mask = 0x7f << 8,
477 .max_multiplier = 2047,
478 .max_divider = 128,
479 .min_divider = 1,
480 .freqsel_mask = 0xf0,
481 };
482
483 of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
484 }
485 CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
486 of_ti_omap3_core_dpll_setup);
487
of_ti_omap3_per_dpll_setup(struct device_node * node)488 static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
489 {
490 const struct dpll_data dd = {
491 .idlest_mask = 0x1 << 1,
492 .enable_mask = 0x7 << 16,
493 .autoidle_mask = 0x7 << 3,
494 .mult_mask = 0x7ff << 8,
495 .div1_mask = 0x7f,
496 .max_multiplier = 2047,
497 .max_divider = 128,
498 .min_divider = 1,
499 .freqsel_mask = 0xf00000,
500 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
501 };
502
503 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
504 }
505 CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
506 of_ti_omap3_per_dpll_setup);
507
of_ti_omap3_per_jtype_dpll_setup(struct device_node * node)508 static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
509 {
510 const struct dpll_data dd = {
511 .idlest_mask = 0x1 << 1,
512 .enable_mask = 0x7 << 16,
513 .autoidle_mask = 0x7 << 3,
514 .mult_mask = 0xfff << 8,
515 .div1_mask = 0x7f,
516 .max_multiplier = 4095,
517 .max_divider = 128,
518 .min_divider = 1,
519 .sddiv_mask = 0xff << 24,
520 .dco_mask = 0xe << 20,
521 .flags = DPLL_J_TYPE,
522 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
523 };
524
525 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
526 }
527 CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
528 of_ti_omap3_per_jtype_dpll_setup);
529 #endif
530
of_ti_omap4_dpll_setup(struct device_node * node)531 static void __init of_ti_omap4_dpll_setup(struct device_node *node)
532 {
533 const struct dpll_data dd = {
534 .idlest_mask = 0x1,
535 .enable_mask = 0x7,
536 .autoidle_mask = 0x7,
537 .mult_mask = 0x7ff << 8,
538 .div1_mask = 0x7f,
539 .max_multiplier = 2047,
540 .max_divider = 128,
541 .min_divider = 1,
542 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
543 };
544
545 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
546 }
547 CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
548 of_ti_omap4_dpll_setup);
549
of_ti_omap5_mpu_dpll_setup(struct device_node * node)550 static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
551 {
552 const struct dpll_data dd = {
553 .idlest_mask = 0x1,
554 .enable_mask = 0x7,
555 .autoidle_mask = 0x7,
556 .mult_mask = 0x7ff << 8,
557 .div1_mask = 0x7f,
558 .max_multiplier = 2047,
559 .max_divider = 128,
560 .dcc_mask = BIT(22),
561 .dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
562 .min_divider = 1,
563 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
564 };
565
566 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
567 }
568 CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
569 of_ti_omap5_mpu_dpll_setup);
570
of_ti_omap4_core_dpll_setup(struct device_node * node)571 static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
572 {
573 const struct dpll_data dd = {
574 .idlest_mask = 0x1,
575 .enable_mask = 0x7,
576 .autoidle_mask = 0x7,
577 .mult_mask = 0x7ff << 8,
578 .div1_mask = 0x7f,
579 .max_multiplier = 2047,
580 .max_divider = 128,
581 .min_divider = 1,
582 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
583 };
584
585 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
586 }
587 CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
588 of_ti_omap4_core_dpll_setup);
589
590 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
591 defined(CONFIG_SOC_DRA7XX)
of_ti_omap4_m4xen_dpll_setup(struct device_node * node)592 static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
593 {
594 const struct dpll_data dd = {
595 .idlest_mask = 0x1,
596 .enable_mask = 0x7,
597 .autoidle_mask = 0x7,
598 .mult_mask = 0x7ff << 8,
599 .div1_mask = 0x7f,
600 .max_multiplier = 2047,
601 .max_divider = 128,
602 .min_divider = 1,
603 .m4xen_mask = 0x800,
604 .lpmode_mask = 1 << 10,
605 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
606 };
607
608 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
609 }
610 CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
611 of_ti_omap4_m4xen_dpll_setup);
612
of_ti_omap4_jtype_dpll_setup(struct device_node * node)613 static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
614 {
615 const struct dpll_data dd = {
616 .idlest_mask = 0x1,
617 .enable_mask = 0x7,
618 .autoidle_mask = 0x7,
619 .mult_mask = 0xfff << 8,
620 .div1_mask = 0xff,
621 .max_multiplier = 4095,
622 .max_divider = 256,
623 .min_divider = 1,
624 .sddiv_mask = 0xff << 24,
625 .flags = DPLL_J_TYPE,
626 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
627 };
628
629 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
630 }
631 CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
632 of_ti_omap4_jtype_dpll_setup);
633 #endif
634
of_ti_am3_no_gate_dpll_setup(struct device_node * node)635 static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
636 {
637 const struct dpll_data dd = {
638 .idlest_mask = 0x1,
639 .enable_mask = 0x7,
640 .mult_mask = 0x7ff << 8,
641 .div1_mask = 0x7f,
642 .max_multiplier = 2047,
643 .max_divider = 128,
644 .min_divider = 1,
645 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
646 };
647
648 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
649 }
650 CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
651 of_ti_am3_no_gate_dpll_setup);
652
of_ti_am3_jtype_dpll_setup(struct device_node * node)653 static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
654 {
655 const struct dpll_data dd = {
656 .idlest_mask = 0x1,
657 .enable_mask = 0x7,
658 .mult_mask = 0x7ff << 8,
659 .div1_mask = 0x7f,
660 .max_multiplier = 4095,
661 .max_divider = 256,
662 .min_divider = 2,
663 .flags = DPLL_J_TYPE,
664 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
665 };
666
667 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
668 }
669 CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
670 of_ti_am3_jtype_dpll_setup);
671
of_ti_am3_no_gate_jtype_dpll_setup(struct device_node * node)672 static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
673 {
674 const struct dpll_data dd = {
675 .idlest_mask = 0x1,
676 .enable_mask = 0x7,
677 .mult_mask = 0x7ff << 8,
678 .div1_mask = 0x7f,
679 .max_multiplier = 2047,
680 .max_divider = 128,
681 .min_divider = 1,
682 .flags = DPLL_J_TYPE,
683 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
684 };
685
686 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
687 }
688 CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
689 "ti,am3-dpll-no-gate-j-type-clock",
690 of_ti_am3_no_gate_jtype_dpll_setup);
691
of_ti_am3_dpll_setup(struct device_node * node)692 static void __init of_ti_am3_dpll_setup(struct device_node *node)
693 {
694 const struct dpll_data dd = {
695 .idlest_mask = 0x1,
696 .enable_mask = 0x7,
697 .mult_mask = 0x7ff << 8,
698 .div1_mask = 0x7f,
699 .max_multiplier = 2047,
700 .max_divider = 128,
701 .min_divider = 1,
702 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
703 };
704
705 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
706 }
707 CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
708
of_ti_am3_core_dpll_setup(struct device_node * node)709 static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
710 {
711 const struct dpll_data dd = {
712 .idlest_mask = 0x1,
713 .enable_mask = 0x7,
714 .mult_mask = 0x7ff << 8,
715 .div1_mask = 0x7f,
716 .max_multiplier = 2047,
717 .max_divider = 128,
718 .min_divider = 1,
719 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
720 };
721
722 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
723 }
724 CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
725 of_ti_am3_core_dpll_setup);
726
of_ti_omap2_core_dpll_setup(struct device_node * node)727 static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
728 {
729 const struct dpll_data dd = {
730 .enable_mask = 0x3,
731 .mult_mask = 0x3ff << 12,
732 .div1_mask = 0xf << 8,
733 .max_divider = 16,
734 .min_divider = 1,
735 };
736
737 of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
738 }
739 CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
740 of_ti_omap2_core_dpll_setup);
741