1/*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
5 * based on
6 *
7 * samsung/clk.c
8 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
9 * Copyright (c) 2013 Linaro Ltd.
10 * Author: Thomas Abraham <thomas.ab@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/slab.h>
24#include <linux/clk.h>
25#include <linux/clk-provider.h>
26#include <linux/mfd/syscon.h>
27#include <linux/regmap.h>
28#include <linux/reboot.h>
29#include "clk.h"
30
31/**
32 * Register a clock branch.
33 * Most clock branches have a form like
34 *
35 * src1 --|--\
36 *        |M |--[GATE]-[DIV]-
37 * src2 --|--/
38 *
39 * sometimes without one of those components.
40 */
41static struct clk *rockchip_clk_register_branch(const char *name,
42		const char **parent_names, u8 num_parents, void __iomem *base,
43		int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
44		u8 div_shift, u8 div_width, u8 div_flags,
45		struct clk_div_table *div_table, int gate_offset,
46		u8 gate_shift, u8 gate_flags, unsigned long flags,
47		spinlock_t *lock)
48{
49	struct clk *clk;
50	struct clk_mux *mux = NULL;
51	struct clk_gate *gate = NULL;
52	struct clk_divider *div = NULL;
53	const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
54			     *gate_ops = NULL;
55
56	if (num_parents > 1) {
57		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
58		if (!mux)
59			return ERR_PTR(-ENOMEM);
60
61		mux->reg = base + muxdiv_offset;
62		mux->shift = mux_shift;
63		mux->mask = BIT(mux_width) - 1;
64		mux->flags = mux_flags;
65		mux->lock = lock;
66		mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
67							: &clk_mux_ops;
68	}
69
70	if (gate_offset >= 0) {
71		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
72		if (!gate)
73			return ERR_PTR(-ENOMEM);
74
75		gate->flags = gate_flags;
76		gate->reg = base + gate_offset;
77		gate->bit_idx = gate_shift;
78		gate->lock = lock;
79		gate_ops = &clk_gate_ops;
80	}
81
82	if (div_width > 0) {
83		div = kzalloc(sizeof(*div), GFP_KERNEL);
84		if (!div)
85			return ERR_PTR(-ENOMEM);
86
87		div->flags = div_flags;
88		div->reg = base + muxdiv_offset;
89		div->shift = div_shift;
90		div->width = div_width;
91		div->lock = lock;
92		div->table = div_table;
93		div_ops = &clk_divider_ops;
94	}
95
96	clk = clk_register_composite(NULL, name, parent_names, num_parents,
97				     mux ? &mux->hw : NULL, mux_ops,
98				     div ? &div->hw : NULL, div_ops,
99				     gate ? &gate->hw : NULL, gate_ops,
100				     flags);
101
102	return clk;
103}
104
105static struct clk *rockchip_clk_register_frac_branch(const char *name,
106		const char **parent_names, u8 num_parents, void __iomem *base,
107		int muxdiv_offset, u8 div_flags,
108		int gate_offset, u8 gate_shift, u8 gate_flags,
109		unsigned long flags, spinlock_t *lock)
110{
111	struct clk *clk;
112	struct clk_gate *gate = NULL;
113	struct clk_fractional_divider *div = NULL;
114	const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
115
116	if (gate_offset >= 0) {
117		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
118		if (!gate)
119			return ERR_PTR(-ENOMEM);
120
121		gate->flags = gate_flags;
122		gate->reg = base + gate_offset;
123		gate->bit_idx = gate_shift;
124		gate->lock = lock;
125		gate_ops = &clk_gate_ops;
126	}
127
128	if (muxdiv_offset < 0)
129		return ERR_PTR(-EINVAL);
130
131	div = kzalloc(sizeof(*div), GFP_KERNEL);
132	if (!div)
133		return ERR_PTR(-ENOMEM);
134
135	div->flags = div_flags;
136	div->reg = base + muxdiv_offset;
137	div->mshift = 16;
138	div->mmask = 0xffff0000;
139	div->nshift = 0;
140	div->nmask = 0xffff;
141	div->lock = lock;
142	div_ops = &clk_fractional_divider_ops;
143
144	clk = clk_register_composite(NULL, name, parent_names, num_parents,
145				     NULL, NULL,
146				     &div->hw, div_ops,
147				     gate ? &gate->hw : NULL, gate_ops,
148				     flags);
149
150	return clk;
151}
152
153static DEFINE_SPINLOCK(clk_lock);
154static struct clk **clk_table;
155static void __iomem *reg_base;
156static struct clk_onecell_data clk_data;
157static struct device_node *cru_node;
158static struct regmap *grf;
159
160void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
161			      unsigned long nr_clks)
162{
163	reg_base = base;
164	cru_node = np;
165	grf = ERR_PTR(-EPROBE_DEFER);
166
167	clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
168	if (!clk_table)
169		pr_err("%s: could not allocate clock lookup table\n", __func__);
170
171	clk_data.clks = clk_table;
172	clk_data.clk_num = nr_clks;
173	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
174}
175
176struct regmap *rockchip_clk_get_grf(void)
177{
178	if (IS_ERR(grf))
179		grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
180	return grf;
181}
182
183void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
184{
185	if (clk_table && id)
186		clk_table[id] = clk;
187}
188
189void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
190				unsigned int nr_pll, int grf_lock_offset)
191{
192	struct clk *clk;
193	int idx;
194
195	for (idx = 0; idx < nr_pll; idx++, list++) {
196		clk = rockchip_clk_register_pll(list->type, list->name,
197				list->parent_names, list->num_parents,
198				reg_base, list->con_offset, grf_lock_offset,
199				list->lock_shift, list->mode_offset,
200				list->mode_shift, list->rate_table,
201				list->pll_flags, &clk_lock);
202		if (IS_ERR(clk)) {
203			pr_err("%s: failed to register clock %s\n", __func__,
204				list->name);
205			continue;
206		}
207
208		rockchip_clk_add_lookup(clk, list->id);
209	}
210}
211
212void __init rockchip_clk_register_branches(
213				      struct rockchip_clk_branch *list,
214				      unsigned int nr_clk)
215{
216	struct clk *clk = NULL;
217	unsigned int idx;
218	unsigned long flags;
219
220	for (idx = 0; idx < nr_clk; idx++, list++) {
221		flags = list->flags;
222
223		/* catch simple muxes */
224		switch (list->branch_type) {
225		case branch_mux:
226			clk = clk_register_mux(NULL, list->name,
227				list->parent_names, list->num_parents,
228				flags, reg_base + list->muxdiv_offset,
229				list->mux_shift, list->mux_width,
230				list->mux_flags, &clk_lock);
231			break;
232		case branch_divider:
233			if (list->div_table)
234				clk = clk_register_divider_table(NULL,
235					list->name, list->parent_names[0],
236					flags, reg_base + list->muxdiv_offset,
237					list->div_shift, list->div_width,
238					list->div_flags, list->div_table,
239					&clk_lock);
240			else
241				clk = clk_register_divider(NULL, list->name,
242					list->parent_names[0], flags,
243					reg_base + list->muxdiv_offset,
244					list->div_shift, list->div_width,
245					list->div_flags, &clk_lock);
246			break;
247		case branch_fraction_divider:
248			clk = rockchip_clk_register_frac_branch(list->name,
249				list->parent_names, list->num_parents,
250				reg_base, list->muxdiv_offset, list->div_flags,
251				list->gate_offset, list->gate_shift,
252				list->gate_flags, flags, &clk_lock);
253			break;
254		case branch_gate:
255			flags |= CLK_SET_RATE_PARENT;
256
257			clk = clk_register_gate(NULL, list->name,
258				list->parent_names[0], flags,
259				reg_base + list->gate_offset,
260				list->gate_shift, list->gate_flags, &clk_lock);
261			break;
262		case branch_composite:
263			clk = rockchip_clk_register_branch(list->name,
264				list->parent_names, list->num_parents,
265				reg_base, list->muxdiv_offset, list->mux_shift,
266				list->mux_width, list->mux_flags,
267				list->div_shift, list->div_width,
268				list->div_flags, list->div_table,
269				list->gate_offset, list->gate_shift,
270				list->gate_flags, flags, &clk_lock);
271			break;
272		case branch_mmc:
273			clk = rockchip_clk_register_mmc(
274				list->name,
275				list->parent_names, list->num_parents,
276				reg_base + list->muxdiv_offset,
277				list->div_shift
278			);
279			break;
280		}
281
282		/* none of the cases above matched */
283		if (!clk) {
284			pr_err("%s: unknown clock type %d\n",
285			       __func__, list->branch_type);
286			continue;
287		}
288
289		if (IS_ERR(clk)) {
290			pr_err("%s: failed to register clock %s: %ld\n",
291			       __func__, list->name, PTR_ERR(clk));
292			continue;
293		}
294
295		rockchip_clk_add_lookup(clk, list->id);
296	}
297}
298
299void __init rockchip_clk_register_armclk(unsigned int lookup_id,
300			const char *name, const char **parent_names,
301			u8 num_parents,
302			const struct rockchip_cpuclk_reg_data *reg_data,
303			const struct rockchip_cpuclk_rate_table *rates,
304			int nrates)
305{
306	struct clk *clk;
307
308	clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
309					   reg_data, rates, nrates, reg_base,
310					   &clk_lock);
311	if (IS_ERR(clk)) {
312		pr_err("%s: failed to register clock %s: %ld\n",
313		       __func__, name, PTR_ERR(clk));
314		return;
315	}
316
317	rockchip_clk_add_lookup(clk, lookup_id);
318}
319
320void __init rockchip_clk_protect_critical(const char *const clocks[],
321					  int nclocks)
322{
323	int i;
324
325	/* Protect the clocks that needs to stay on */
326	for (i = 0; i < nclocks; i++) {
327		struct clk *clk = __clk_lookup(clocks[i]);
328
329		if (clk)
330			clk_prepare_enable(clk);
331	}
332}
333
334static unsigned int reg_restart;
335static int rockchip_restart_notify(struct notifier_block *this,
336				   unsigned long mode, void *cmd)
337{
338	writel(0xfdb9, reg_base + reg_restart);
339	return NOTIFY_DONE;
340}
341
342static struct notifier_block rockchip_restart_handler = {
343	.notifier_call = rockchip_restart_notify,
344	.priority = 128,
345};
346
347void __init rockchip_register_restart_notifier(unsigned int reg)
348{
349	int ret;
350
351	reg_restart = reg;
352	ret = register_restart_handler(&rockchip_restart_handler);
353	if (ret)
354		pr_err("%s: cannot register restart handler, %d\n",
355		       __func__, ret);
356}
357