1/*
2 * Copyright (c) 2013-2014, NVIDIA CORPORATION.  All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Based on drivers/misc/eeprom/sunxi_sid.c
17 */
18
19#include <linux/device.h>
20#include <linux/clk.h>
21#include <linux/completion.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
24#include <linux/err.h>
25#include <linux/io.h>
26#include <linux/kernel.h>
27#include <linux/kobject.h>
28#include <linux/of_device.h>
29#include <linux/platform_device.h>
30#include <linux/random.h>
31
32#include <soc/tegra/fuse.h>
33
34#include "fuse.h"
35
36#define FUSE_BEGIN	0x100
37#define FUSE_SIZE	0x1f8
38#define FUSE_UID_LOW	0x08
39#define FUSE_UID_HIGH	0x0c
40
41static phys_addr_t fuse_phys;
42static struct clk *fuse_clk;
43static void __iomem __initdata *fuse_base;
44
45static DEFINE_MUTEX(apb_dma_lock);
46static DECLARE_COMPLETION(apb_dma_wait);
47static struct dma_chan *apb_dma_chan;
48static struct dma_slave_config dma_sconfig;
49static u32 *apb_buffer;
50static dma_addr_t apb_buffer_phys;
51
52static void apb_dma_complete(void *args)
53{
54	complete(&apb_dma_wait);
55}
56
57static u32 tegra20_fuse_readl(const unsigned int offset)
58{
59	int ret;
60	u32 val = 0;
61	struct dma_async_tx_descriptor *dma_desc;
62
63	mutex_lock(&apb_dma_lock);
64
65	dma_sconfig.src_addr = fuse_phys + FUSE_BEGIN + offset;
66	ret = dmaengine_slave_config(apb_dma_chan, &dma_sconfig);
67	if (ret)
68		goto out;
69
70	dma_desc = dmaengine_prep_slave_single(apb_dma_chan, apb_buffer_phys,
71			sizeof(u32), DMA_DEV_TO_MEM,
72			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
73	if (!dma_desc)
74		goto out;
75
76	dma_desc->callback = apb_dma_complete;
77	dma_desc->callback_param = NULL;
78
79	reinit_completion(&apb_dma_wait);
80
81	clk_prepare_enable(fuse_clk);
82
83	dmaengine_submit(dma_desc);
84	dma_async_issue_pending(apb_dma_chan);
85	ret = wait_for_completion_timeout(&apb_dma_wait, msecs_to_jiffies(50));
86
87	if (WARN(ret == 0, "apb read dma timed out"))
88		dmaengine_terminate_all(apb_dma_chan);
89	else
90		val = *apb_buffer;
91
92	clk_disable_unprepare(fuse_clk);
93out:
94	mutex_unlock(&apb_dma_lock);
95
96	return val;
97}
98
99static const struct of_device_id tegra20_fuse_of_match[] = {
100	{ .compatible = "nvidia,tegra20-efuse" },
101	{},
102};
103
104static int apb_dma_init(void)
105{
106	dma_cap_mask_t mask;
107
108	dma_cap_zero(mask);
109	dma_cap_set(DMA_SLAVE, mask);
110	apb_dma_chan = dma_request_channel(mask, NULL, NULL);
111	if (!apb_dma_chan)
112		return -EPROBE_DEFER;
113
114	apb_buffer = dma_alloc_coherent(NULL, sizeof(u32), &apb_buffer_phys,
115					GFP_KERNEL);
116	if (!apb_buffer) {
117		dma_release_channel(apb_dma_chan);
118		return -ENOMEM;
119	}
120
121	dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
122	dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
123	dma_sconfig.src_maxburst = 1;
124	dma_sconfig.dst_maxburst = 1;
125
126	return 0;
127}
128
129static int tegra20_fuse_probe(struct platform_device *pdev)
130{
131	struct resource *res;
132	int err;
133
134	fuse_clk = devm_clk_get(&pdev->dev, NULL);
135	if (IS_ERR(fuse_clk)) {
136		dev_err(&pdev->dev, "missing clock");
137		return PTR_ERR(fuse_clk);
138	}
139
140	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
141	if (!res)
142		return -EINVAL;
143	fuse_phys = res->start;
144
145	err = apb_dma_init();
146	if (err)
147		return err;
148
149	if (tegra_fuse_create_sysfs(&pdev->dev, FUSE_SIZE, tegra20_fuse_readl))
150		return -ENODEV;
151
152	dev_dbg(&pdev->dev, "loaded\n");
153
154	return 0;
155}
156
157static struct platform_driver tegra20_fuse_driver = {
158	.probe = tegra20_fuse_probe,
159	.driver = {
160		.name = "tegra20_fuse",
161		.of_match_table = tegra20_fuse_of_match,
162	}
163};
164
165static int __init tegra20_fuse_init(void)
166{
167	return platform_driver_register(&tegra20_fuse_driver);
168}
169postcore_initcall(tegra20_fuse_init);
170
171/* Early boot code. This code is called before the devices are created */
172
173u32 __init tegra20_fuse_early(const unsigned int offset)
174{
175	return readl_relaxed(fuse_base + FUSE_BEGIN + offset);
176}
177
178bool __init tegra20_spare_fuse_early(int spare_bit)
179{
180	u32 offset = spare_bit * 4;
181	bool value;
182
183	value = tegra20_fuse_early(offset + 0x100);
184
185	return value;
186}
187
188static void __init tegra20_fuse_add_randomness(void)
189{
190	u32 randomness[7];
191
192	randomness[0] = tegra_sku_info.sku_id;
193	randomness[1] = tegra_read_straps();
194	randomness[2] = tegra_read_chipid();
195	randomness[3] = tegra_sku_info.cpu_process_id << 16;
196	randomness[3] |= tegra_sku_info.core_process_id;
197	randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
198	randomness[4] |= tegra_sku_info.soc_speedo_id;
199	randomness[5] = tegra20_fuse_early(FUSE_UID_LOW);
200	randomness[6] = tegra20_fuse_early(FUSE_UID_HIGH);
201
202	add_device_randomness(randomness, sizeof(randomness));
203}
204
205void __init tegra20_init_fuse_early(void)
206{
207	fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
208
209	tegra_init_revision();
210	tegra20_init_speedo_data(&tegra_sku_info);
211	tegra20_fuse_add_randomness();
212
213	iounmap(fuse_base);
214}
215