1/*
2 * OMAP DPLL clock support
3 *
4 * Copyright (C) 2013 Texas Instruments, Inc.
5 *
6 * Tero Kristo <t-kristo@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk-provider.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/clk/ti.h>
24#include "clock.h"
25
26#undef pr_fmt
27#define pr_fmt(fmt) "%s: " fmt, __func__
28
29#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
30	defined(CONFIG_SOC_DRA7XX)
31static const struct clk_ops dpll_m4xen_ck_ops = {
32	.enable		= &omap3_noncore_dpll_enable,
33	.disable	= &omap3_noncore_dpll_disable,
34	.recalc_rate	= &omap4_dpll_regm4xen_recalc,
35	.round_rate	= &omap4_dpll_regm4xen_round_rate,
36	.set_rate	= &omap3_noncore_dpll_set_rate,
37	.set_parent	= &omap3_noncore_dpll_set_parent,
38	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
39	.determine_rate	= &omap4_dpll_regm4xen_determine_rate,
40	.get_parent	= &omap2_init_dpll_parent,
41};
42#else
43static const struct clk_ops dpll_m4xen_ck_ops = {};
44#endif
45
46#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
47	defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
48	defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
49static const struct clk_ops dpll_core_ck_ops = {
50	.recalc_rate	= &omap3_dpll_recalc,
51	.get_parent	= &omap2_init_dpll_parent,
52};
53
54static const struct clk_ops dpll_ck_ops = {
55	.enable		= &omap3_noncore_dpll_enable,
56	.disable	= &omap3_noncore_dpll_disable,
57	.recalc_rate	= &omap3_dpll_recalc,
58	.round_rate	= &omap2_dpll_round_rate,
59	.set_rate	= &omap3_noncore_dpll_set_rate,
60	.set_parent	= &omap3_noncore_dpll_set_parent,
61	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
62	.determine_rate	= &omap3_noncore_dpll_determine_rate,
63	.get_parent	= &omap2_init_dpll_parent,
64};
65
66static const struct clk_ops dpll_no_gate_ck_ops = {
67	.recalc_rate	= &omap3_dpll_recalc,
68	.get_parent	= &omap2_init_dpll_parent,
69	.round_rate	= &omap2_dpll_round_rate,
70	.set_rate	= &omap3_noncore_dpll_set_rate,
71	.set_parent	= &omap3_noncore_dpll_set_parent,
72	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
73	.determine_rate	= &omap3_noncore_dpll_determine_rate,
74};
75#else
76static const struct clk_ops dpll_core_ck_ops = {};
77static const struct clk_ops dpll_ck_ops = {};
78static const struct clk_ops dpll_no_gate_ck_ops = {};
79const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
80#endif
81
82#ifdef CONFIG_ARCH_OMAP2
83static const struct clk_ops omap2_dpll_core_ck_ops = {
84	.get_parent	= &omap2_init_dpll_parent,
85	.recalc_rate	= &omap2_dpllcore_recalc,
86	.round_rate	= &omap2_dpll_round_rate,
87	.set_rate	= &omap2_reprogram_dpllcore,
88};
89#else
90static const struct clk_ops omap2_dpll_core_ck_ops = {};
91#endif
92
93#ifdef CONFIG_ARCH_OMAP3
94static const struct clk_ops omap3_dpll_core_ck_ops = {
95	.get_parent	= &omap2_init_dpll_parent,
96	.recalc_rate	= &omap3_dpll_recalc,
97	.round_rate	= &omap2_dpll_round_rate,
98};
99#else
100static const struct clk_ops omap3_dpll_core_ck_ops = {};
101#endif
102
103#ifdef CONFIG_ARCH_OMAP3
104static const struct clk_ops omap3_dpll_ck_ops = {
105	.enable		= &omap3_noncore_dpll_enable,
106	.disable	= &omap3_noncore_dpll_disable,
107	.get_parent	= &omap2_init_dpll_parent,
108	.recalc_rate	= &omap3_dpll_recalc,
109	.set_rate	= &omap3_noncore_dpll_set_rate,
110	.set_parent	= &omap3_noncore_dpll_set_parent,
111	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
112	.determine_rate	= &omap3_noncore_dpll_determine_rate,
113	.round_rate	= &omap2_dpll_round_rate,
114};
115
116static const struct clk_ops omap3_dpll_per_ck_ops = {
117	.enable		= &omap3_noncore_dpll_enable,
118	.disable	= &omap3_noncore_dpll_disable,
119	.get_parent	= &omap2_init_dpll_parent,
120	.recalc_rate	= &omap3_dpll_recalc,
121	.set_rate	= &omap3_dpll4_set_rate,
122	.set_parent	= &omap3_noncore_dpll_set_parent,
123	.set_rate_and_parent	= &omap3_dpll4_set_rate_and_parent,
124	.determine_rate	= &omap3_noncore_dpll_determine_rate,
125	.round_rate	= &omap2_dpll_round_rate,
126};
127#endif
128
129static const struct clk_ops dpll_x2_ck_ops = {
130	.recalc_rate	= &omap3_clkoutx2_recalc,
131};
132
133/**
134 * _register_dpll - low level registration of a DPLL clock
135 * @hw: hardware clock definition for the clock
136 * @node: device node for the clock
137 *
138 * Finalizes DPLL registration process. In case a failure (clk-ref or
139 * clk-bypass is missing), the clock is added to retry list and
140 * the initialization is retried on later stage.
141 */
142static void __init _register_dpll(struct clk_hw *hw,
143				  struct device_node *node)
144{
145	struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
146	struct dpll_data *dd = clk_hw->dpll_data;
147	struct clk *clk;
148
149	dd->clk_ref = of_clk_get(node, 0);
150	dd->clk_bypass = of_clk_get(node, 1);
151
152	if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
153		pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
154			 node->name);
155		if (!ti_clk_retry_init(node, hw, _register_dpll))
156			return;
157
158		goto cleanup;
159	}
160
161	/* register the clock */
162	clk = clk_register(NULL, &clk_hw->hw);
163
164	if (!IS_ERR(clk)) {
165		omap2_init_clk_hw_omap_clocks(clk);
166		of_clk_add_provider(node, of_clk_src_simple_get, clk);
167		kfree(clk_hw->hw.init->parent_names);
168		kfree(clk_hw->hw.init);
169		return;
170	}
171
172cleanup:
173	kfree(clk_hw->dpll_data);
174	kfree(clk_hw->hw.init->parent_names);
175	kfree(clk_hw->hw.init);
176	kfree(clk_hw);
177}
178
179#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
180void __iomem *_get_reg(u8 module, u16 offset)
181{
182	u32 reg;
183	struct clk_omap_reg *reg_setup;
184
185	reg_setup = (struct clk_omap_reg *)&reg;
186
187	reg_setup->index = module;
188	reg_setup->offset = offset;
189
190	return (void __iomem *)reg;
191}
192
193struct clk *ti_clk_register_dpll(struct ti_clk *setup)
194{
195	struct clk_hw_omap *clk_hw;
196	struct clk_init_data init = { NULL };
197	struct dpll_data *dd;
198	struct clk *clk;
199	struct ti_clk_dpll *dpll;
200	const struct clk_ops *ops = &omap3_dpll_ck_ops;
201	struct clk *clk_ref;
202	struct clk *clk_bypass;
203
204	dpll = setup->data;
205
206	if (dpll->num_parents < 2)
207		return ERR_PTR(-EINVAL);
208
209	clk_ref = clk_get_sys(NULL, dpll->parents[0]);
210	clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
211
212	if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
213		return ERR_PTR(-EAGAIN);
214
215	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
216	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
217	if (!dd || !clk_hw) {
218		clk = ERR_PTR(-ENOMEM);
219		goto cleanup;
220	}
221
222	clk_hw->dpll_data = dd;
223	clk_hw->ops = &clkhwops_omap3_dpll;
224	clk_hw->hw.init = &init;
225	clk_hw->flags = MEMMAP_ADDRESSING;
226
227	init.name = setup->name;
228	init.ops = ops;
229
230	init.num_parents = dpll->num_parents;
231	init.parent_names = dpll->parents;
232
233	dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
234	dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
235	dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
236	dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
237
238	dd->modes = dpll->modes;
239	dd->div1_mask = dpll->div1_mask;
240	dd->idlest_mask = dpll->idlest_mask;
241	dd->mult_mask = dpll->mult_mask;
242	dd->autoidle_mask = dpll->autoidle_mask;
243	dd->enable_mask = dpll->enable_mask;
244	dd->sddiv_mask = dpll->sddiv_mask;
245	dd->dco_mask = dpll->dco_mask;
246	dd->max_divider = dpll->max_divider;
247	dd->min_divider = dpll->min_divider;
248	dd->max_multiplier = dpll->max_multiplier;
249	dd->auto_recal_bit = dpll->auto_recal_bit;
250	dd->recal_en_bit = dpll->recal_en_bit;
251	dd->recal_st_bit = dpll->recal_st_bit;
252
253	dd->clk_ref = clk_ref;
254	dd->clk_bypass = clk_bypass;
255
256	if (dpll->flags & CLKF_CORE)
257		ops = &omap3_dpll_core_ck_ops;
258
259	if (dpll->flags & CLKF_PER)
260		ops = &omap3_dpll_per_ck_ops;
261
262	if (dpll->flags & CLKF_J_TYPE)
263		dd->flags |= DPLL_J_TYPE;
264
265	clk = clk_register(NULL, &clk_hw->hw);
266
267	if (!IS_ERR(clk))
268		return clk;
269
270cleanup:
271	kfree(dd);
272	kfree(clk_hw);
273	return clk;
274}
275#endif
276
277#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
278	defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
279	defined(CONFIG_SOC_AM43XX)
280/**
281 * _register_dpll_x2 - Registers a DPLLx2 clock
282 * @node: device node for this clock
283 * @ops: clk_ops for this clock
284 * @hw_ops: clk_hw_ops for this clock
285 *
286 * Initializes a DPLL x 2 clock from device tree data.
287 */
288static void _register_dpll_x2(struct device_node *node,
289			      const struct clk_ops *ops,
290			      const struct clk_hw_omap_ops *hw_ops)
291{
292	struct clk *clk;
293	struct clk_init_data init = { NULL };
294	struct clk_hw_omap *clk_hw;
295	const char *name = node->name;
296	const char *parent_name;
297
298	parent_name = of_clk_get_parent_name(node, 0);
299	if (!parent_name) {
300		pr_err("%s must have parent\n", node->name);
301		return;
302	}
303
304	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
305	if (!clk_hw)
306		return;
307
308	clk_hw->ops = hw_ops;
309	clk_hw->hw.init = &init;
310
311	init.name = name;
312	init.ops = ops;
313	init.parent_names = &parent_name;
314	init.num_parents = 1;
315
316	/* register the clock */
317	clk = clk_register(NULL, &clk_hw->hw);
318
319	if (IS_ERR(clk)) {
320		kfree(clk_hw);
321	} else {
322		omap2_init_clk_hw_omap_clocks(clk);
323		of_clk_add_provider(node, of_clk_src_simple_get, clk);
324	}
325}
326#endif
327
328/**
329 * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
330 * @node: device node containing the DPLL info
331 * @ops: ops for the DPLL
332 * @ddt: DPLL data template to use
333 *
334 * Initializes a DPLL clock from device tree data.
335 */
336static void __init of_ti_dpll_setup(struct device_node *node,
337				    const struct clk_ops *ops,
338				    const struct dpll_data *ddt)
339{
340	struct clk_hw_omap *clk_hw = NULL;
341	struct clk_init_data *init = NULL;
342	const char **parent_names = NULL;
343	struct dpll_data *dd = NULL;
344	int i;
345	u8 dpll_mode = 0;
346
347	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
348	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
349	init = kzalloc(sizeof(*init), GFP_KERNEL);
350	if (!dd || !clk_hw || !init)
351		goto cleanup;
352
353	memcpy(dd, ddt, sizeof(*dd));
354
355	clk_hw->dpll_data = dd;
356	clk_hw->ops = &clkhwops_omap3_dpll;
357	clk_hw->hw.init = init;
358	clk_hw->flags = MEMMAP_ADDRESSING;
359
360	init->name = node->name;
361	init->ops = ops;
362
363	init->num_parents = of_clk_get_parent_count(node);
364	if (init->num_parents < 1) {
365		pr_err("%s must have parent(s)\n", node->name);
366		goto cleanup;
367	}
368
369	parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
370	if (!parent_names)
371		goto cleanup;
372
373	for (i = 0; i < init->num_parents; i++)
374		parent_names[i] = of_clk_get_parent_name(node, i);
375
376	init->parent_names = parent_names;
377
378	dd->control_reg = ti_clk_get_reg_addr(node, 0);
379
380	/*
381	 * Special case for OMAP2 DPLL, register order is different due to
382	 * missing idlest_reg, also clkhwops is different. Detected from
383	 * missing idlest_mask.
384	 */
385	if (!dd->idlest_mask) {
386		dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1);
387#ifdef CONFIG_ARCH_OMAP2
388		clk_hw->ops = &clkhwops_omap2xxx_dpll;
389		omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
390#endif
391	} else {
392		dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
393		if (IS_ERR(dd->idlest_reg))
394			goto cleanup;
395
396		dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
397	}
398
399	if (IS_ERR(dd->control_reg) || IS_ERR(dd->mult_div1_reg))
400		goto cleanup;
401
402	if (dd->autoidle_mask) {
403		dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
404		if (IS_ERR(dd->autoidle_reg))
405			goto cleanup;
406	}
407
408	if (of_property_read_bool(node, "ti,low-power-stop"))
409		dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
410
411	if (of_property_read_bool(node, "ti,low-power-bypass"))
412		dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
413
414	if (of_property_read_bool(node, "ti,lock"))
415		dpll_mode |= 1 << DPLL_LOCKED;
416
417	if (dpll_mode)
418		dd->modes = dpll_mode;
419
420	_register_dpll(&clk_hw->hw, node);
421	return;
422
423cleanup:
424	kfree(dd);
425	kfree(parent_names);
426	kfree(init);
427	kfree(clk_hw);
428}
429
430#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
431	defined(CONFIG_SOC_DRA7XX)
432static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
433{
434	_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
435}
436CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
437	       of_ti_omap4_dpll_x2_setup);
438#endif
439
440#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
441static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
442{
443	_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
444}
445CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
446	       of_ti_am3_dpll_x2_setup);
447#endif
448
449#ifdef CONFIG_ARCH_OMAP3
450static void __init of_ti_omap3_dpll_setup(struct device_node *node)
451{
452	const struct dpll_data dd = {
453		.idlest_mask = 0x1,
454		.enable_mask = 0x7,
455		.autoidle_mask = 0x7,
456		.mult_mask = 0x7ff << 8,
457		.div1_mask = 0x7f,
458		.max_multiplier = 2047,
459		.max_divider = 128,
460		.min_divider = 1,
461		.freqsel_mask = 0xf0,
462		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
463	};
464
465	of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
466}
467CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
468	       of_ti_omap3_dpll_setup);
469
470static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
471{
472	const struct dpll_data dd = {
473		.idlest_mask = 0x1,
474		.enable_mask = 0x7,
475		.autoidle_mask = 0x7,
476		.mult_mask = 0x7ff << 16,
477		.div1_mask = 0x7f << 8,
478		.max_multiplier = 2047,
479		.max_divider = 128,
480		.min_divider = 1,
481		.freqsel_mask = 0xf0,
482	};
483
484	of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
485}
486CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
487	       of_ti_omap3_core_dpll_setup);
488
489static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
490{
491	const struct dpll_data dd = {
492		.idlest_mask = 0x1 << 1,
493		.enable_mask = 0x7 << 16,
494		.autoidle_mask = 0x7 << 3,
495		.mult_mask = 0x7ff << 8,
496		.div1_mask = 0x7f,
497		.max_multiplier = 2047,
498		.max_divider = 128,
499		.min_divider = 1,
500		.freqsel_mask = 0xf00000,
501		.modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
502	};
503
504	of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
505}
506CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
507	       of_ti_omap3_per_dpll_setup);
508
509static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
510{
511	const struct dpll_data dd = {
512		.idlest_mask = 0x1 << 1,
513		.enable_mask = 0x7 << 16,
514		.autoidle_mask = 0x7 << 3,
515		.mult_mask = 0xfff << 8,
516		.div1_mask = 0x7f,
517		.max_multiplier = 4095,
518		.max_divider = 128,
519		.min_divider = 1,
520		.sddiv_mask = 0xff << 24,
521		.dco_mask = 0xe << 20,
522		.flags = DPLL_J_TYPE,
523		.modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
524	};
525
526	of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
527}
528CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
529	       of_ti_omap3_per_jtype_dpll_setup);
530#endif
531
532static void __init of_ti_omap4_dpll_setup(struct device_node *node)
533{
534	const struct dpll_data dd = {
535		.idlest_mask = 0x1,
536		.enable_mask = 0x7,
537		.autoidle_mask = 0x7,
538		.mult_mask = 0x7ff << 8,
539		.div1_mask = 0x7f,
540		.max_multiplier = 2047,
541		.max_divider = 128,
542		.min_divider = 1,
543		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
544	};
545
546	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
547}
548CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
549	       of_ti_omap4_dpll_setup);
550
551static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
552{
553	const struct dpll_data dd = {
554		.idlest_mask = 0x1,
555		.enable_mask = 0x7,
556		.autoidle_mask = 0x7,
557		.mult_mask = 0x7ff << 8,
558		.div1_mask = 0x7f,
559		.max_multiplier = 2047,
560		.max_divider = 128,
561		.dcc_mask = BIT(22),
562		.dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
563		.min_divider = 1,
564		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
565	};
566
567	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
568}
569CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
570	       of_ti_omap5_mpu_dpll_setup);
571
572static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
573{
574	const struct dpll_data dd = {
575		.idlest_mask = 0x1,
576		.enable_mask = 0x7,
577		.autoidle_mask = 0x7,
578		.mult_mask = 0x7ff << 8,
579		.div1_mask = 0x7f,
580		.max_multiplier = 2047,
581		.max_divider = 128,
582		.min_divider = 1,
583		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
584	};
585
586	of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
587}
588CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
589	       of_ti_omap4_core_dpll_setup);
590
591#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
592	defined(CONFIG_SOC_DRA7XX)
593static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
594{
595	const struct dpll_data dd = {
596		.idlest_mask = 0x1,
597		.enable_mask = 0x7,
598		.autoidle_mask = 0x7,
599		.mult_mask = 0x7ff << 8,
600		.div1_mask = 0x7f,
601		.max_multiplier = 2047,
602		.max_divider = 128,
603		.min_divider = 1,
604		.m4xen_mask = 0x800,
605		.lpmode_mask = 1 << 10,
606		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
607	};
608
609	of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
610}
611CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
612	       of_ti_omap4_m4xen_dpll_setup);
613
614static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
615{
616	const struct dpll_data dd = {
617		.idlest_mask = 0x1,
618		.enable_mask = 0x7,
619		.autoidle_mask = 0x7,
620		.mult_mask = 0xfff << 8,
621		.div1_mask = 0xff,
622		.max_multiplier = 4095,
623		.max_divider = 256,
624		.min_divider = 1,
625		.sddiv_mask = 0xff << 24,
626		.flags = DPLL_J_TYPE,
627		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
628	};
629
630	of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
631}
632CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
633	       of_ti_omap4_jtype_dpll_setup);
634#endif
635
636static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
637{
638	const struct dpll_data dd = {
639		.idlest_mask = 0x1,
640		.enable_mask = 0x7,
641		.mult_mask = 0x7ff << 8,
642		.div1_mask = 0x7f,
643		.max_multiplier = 2047,
644		.max_divider = 128,
645		.min_divider = 1,
646		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
647	};
648
649	of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
650}
651CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
652	       of_ti_am3_no_gate_dpll_setup);
653
654static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
655{
656	const struct dpll_data dd = {
657		.idlest_mask = 0x1,
658		.enable_mask = 0x7,
659		.mult_mask = 0x7ff << 8,
660		.div1_mask = 0x7f,
661		.max_multiplier = 4095,
662		.max_divider = 256,
663		.min_divider = 2,
664		.flags = DPLL_J_TYPE,
665		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
666	};
667
668	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
669}
670CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
671	       of_ti_am3_jtype_dpll_setup);
672
673static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
674{
675	const struct dpll_data dd = {
676		.idlest_mask = 0x1,
677		.enable_mask = 0x7,
678		.mult_mask = 0x7ff << 8,
679		.div1_mask = 0x7f,
680		.max_multiplier = 2047,
681		.max_divider = 128,
682		.min_divider = 1,
683		.flags = DPLL_J_TYPE,
684		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
685	};
686
687	of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
688}
689CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
690	       "ti,am3-dpll-no-gate-j-type-clock",
691	       of_ti_am3_no_gate_jtype_dpll_setup);
692
693static void __init of_ti_am3_dpll_setup(struct device_node *node)
694{
695	const struct dpll_data dd = {
696		.idlest_mask = 0x1,
697		.enable_mask = 0x7,
698		.mult_mask = 0x7ff << 8,
699		.div1_mask = 0x7f,
700		.max_multiplier = 2047,
701		.max_divider = 128,
702		.min_divider = 1,
703		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
704	};
705
706	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
707}
708CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
709
710static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
711{
712	const struct dpll_data dd = {
713		.idlest_mask = 0x1,
714		.enable_mask = 0x7,
715		.mult_mask = 0x7ff << 8,
716		.div1_mask = 0x7f,
717		.max_multiplier = 2047,
718		.max_divider = 128,
719		.min_divider = 1,
720		.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
721	};
722
723	of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
724}
725CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
726	       of_ti_am3_core_dpll_setup);
727
728static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
729{
730	const struct dpll_data dd = {
731		.enable_mask = 0x3,
732		.mult_mask = 0x3ff << 12,
733		.div1_mask = 0xf << 8,
734		.max_divider = 16,
735		.min_divider = 1,
736	};
737
738	of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
739}
740CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
741	       of_ti_omap2_core_dpll_setup);
742