1 /*
2  * drivers/mtd/devices/goldfish_nand.c
3  *
4  * Copyright (C) 2007 Google, Inc.
5  * Copyright (C) 2012 Intel, Inc.
6  * Copyright (C) 2013 Intel, Inc.
7  *
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18 
19 #include <linux/io.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/ioport.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mtd/mtd.h>
26 #include <linux/platform_device.h>
27 #include <linux/mutex.h>
28 #include <linux/goldfish.h>
29 #include <asm/div64.h>
30 
31 #include "goldfish_nand_reg.h"
32 
33 struct goldfish_nand {
34 	/* lock protects access to the device registers */
35 	struct mutex            lock;
36 	unsigned char __iomem  *base;
37 	struct cmd_params       *cmd_params;
38 	size_t                  mtd_count;
39 	struct mtd_info         mtd[0];
40 };
41 
goldfish_nand_cmd_with_params(struct mtd_info * mtd,enum nand_cmd cmd,u64 addr,u32 len,void * ptr,u32 * rv)42 static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd,
43 					 enum nand_cmd cmd, u64 addr, u32 len,
44 					 void *ptr, u32 *rv)
45 {
46 	u32 cmdp;
47 	struct goldfish_nand *nand = mtd->priv;
48 	struct cmd_params *cps = nand->cmd_params;
49 	unsigned char __iomem  *base = nand->base;
50 
51 	if (!cps)
52 		return -1;
53 
54 	switch (cmd) {
55 	case NAND_CMD_ERASE:
56 		cmdp = NAND_CMD_ERASE_WITH_PARAMS;
57 		break;
58 	case NAND_CMD_READ:
59 		cmdp = NAND_CMD_READ_WITH_PARAMS;
60 		break;
61 	case NAND_CMD_WRITE:
62 		cmdp = NAND_CMD_WRITE_WITH_PARAMS;
63 		break;
64 	default:
65 		return -1;
66 	}
67 	cps->dev = mtd - nand->mtd;
68 	cps->addr_high = (u32)(addr >> 32);
69 	cps->addr_low = (u32)addr;
70 	cps->transfer_size = len;
71 	cps->data = (unsigned long)ptr;
72 	writel(cmdp, base + NAND_COMMAND);
73 	*rv = cps->result;
74 	return 0;
75 }
76 
goldfish_nand_cmd(struct mtd_info * mtd,enum nand_cmd cmd,u64 addr,u32 len,void * ptr)77 static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd,
78 			     u64 addr, u32 len, void *ptr)
79 {
80 	struct goldfish_nand *nand = mtd->priv;
81 	u32 rv;
82 	unsigned char __iomem  *base = nand->base;
83 
84 	mutex_lock(&nand->lock);
85 	if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) {
86 		writel(mtd - nand->mtd, base + NAND_DEV);
87 		writel((u32)(addr >> 32), base + NAND_ADDR_HIGH);
88 		writel((u32)addr, base + NAND_ADDR_LOW);
89 		writel(len, base + NAND_TRANSFER_SIZE);
90 		gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH);
91 		writel(cmd, base + NAND_COMMAND);
92 		rv = readl(base + NAND_RESULT);
93 	}
94 	mutex_unlock(&nand->lock);
95 	return rv;
96 }
97 
goldfish_nand_erase(struct mtd_info * mtd,struct erase_info * instr)98 static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
99 {
100 	loff_t ofs = instr->addr;
101 	u32 len = instr->len;
102 	u32 rem;
103 
104 	if (ofs + len > mtd->size)
105 		goto invalid_arg;
106 	rem = do_div(ofs, mtd->writesize);
107 	if (rem)
108 		goto invalid_arg;
109 	ofs *= (mtd->writesize + mtd->oobsize);
110 
111 	if (len % mtd->writesize)
112 		goto invalid_arg;
113 	len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
114 
115 	if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
116 		pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
117 		       ofs, len, mtd->size, mtd->erasesize);
118 		return -EIO;
119 	}
120 
121 	instr->state = MTD_ERASE_DONE;
122 	mtd_erase_callback(instr);
123 
124 	return 0;
125 
126 invalid_arg:
127 	pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
128 	       ofs, len, mtd->size, mtd->erasesize);
129 	return -EINVAL;
130 }
131 
goldfish_nand_read_oob(struct mtd_info * mtd,loff_t ofs,struct mtd_oob_ops * ops)132 static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
133 				  struct mtd_oob_ops *ops)
134 {
135 	u32 rem;
136 
137 	if (ofs + ops->len > mtd->size)
138 		goto invalid_arg;
139 	if (ops->datbuf && ops->len && ops->len != mtd->writesize)
140 		goto invalid_arg;
141 	if (ops->ooblen + ops->ooboffs > mtd->oobsize)
142 		goto invalid_arg;
143 
144 	rem = do_div(ofs, mtd->writesize);
145 	if (rem)
146 		goto invalid_arg;
147 	ofs *= (mtd->writesize + mtd->oobsize);
148 
149 	if (ops->datbuf)
150 		ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
151 						ops->len, ops->datbuf);
152 	ofs += mtd->writesize + ops->ooboffs;
153 	if (ops->oobbuf)
154 		ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
155 						ops->ooblen, ops->oobbuf);
156 	return 0;
157 
158 invalid_arg:
159 	pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
160 	       ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
161 	return -EINVAL;
162 }
163 
goldfish_nand_write_oob(struct mtd_info * mtd,loff_t ofs,struct mtd_oob_ops * ops)164 static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
165 				   struct mtd_oob_ops *ops)
166 {
167 	u32 rem;
168 
169 	if (ofs + ops->len > mtd->size)
170 		goto invalid_arg;
171 	if (ops->len && ops->len != mtd->writesize)
172 		goto invalid_arg;
173 	if (ops->ooblen + ops->ooboffs > mtd->oobsize)
174 		goto invalid_arg;
175 
176 	rem = do_div(ofs, mtd->writesize);
177 	if (rem)
178 		goto invalid_arg;
179 	ofs *= (mtd->writesize + mtd->oobsize);
180 
181 	if (ops->datbuf)
182 		ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
183 						ops->len, ops->datbuf);
184 	ofs += mtd->writesize + ops->ooboffs;
185 	if (ops->oobbuf)
186 		ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
187 						ops->ooblen, ops->oobbuf);
188 	return 0;
189 
190 invalid_arg:
191 	pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
192 	       ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
193 	return -EINVAL;
194 }
195 
goldfish_nand_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)196 static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
197 			      size_t *retlen, u_char *buf)
198 {
199 	u32 rem;
200 
201 	if (from + len > mtd->size)
202 		goto invalid_arg;
203 
204 	rem = do_div(from, mtd->writesize);
205 	if (rem)
206 		goto invalid_arg;
207 	from *= (mtd->writesize + mtd->oobsize);
208 
209 	*retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf);
210 	return 0;
211 
212 invalid_arg:
213 	pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n",
214 	       from, len, mtd->size, mtd->writesize);
215 	return -EINVAL;
216 }
217 
goldfish_nand_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)218 static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
219 			       size_t *retlen, const u_char *buf)
220 {
221 	u32 rem;
222 
223 	if (to + len > mtd->size)
224 		goto invalid_arg;
225 
226 	rem = do_div(to, mtd->writesize);
227 	if (rem)
228 		goto invalid_arg;
229 	to *= (mtd->writesize + mtd->oobsize);
230 
231 	*retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf);
232 	return 0;
233 
234 invalid_arg:
235 	pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n",
236 	       to, len, mtd->size, mtd->writesize);
237 	return -EINVAL;
238 }
239 
goldfish_nand_block_isbad(struct mtd_info * mtd,loff_t ofs)240 static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
241 {
242 	u32 rem;
243 
244 	if (ofs >= mtd->size)
245 		goto invalid_arg;
246 
247 	rem = do_div(ofs, mtd->erasesize);
248 	if (rem)
249 		goto invalid_arg;
250 	ofs *= mtd->erasesize / mtd->writesize;
251 	ofs *= (mtd->writesize + mtd->oobsize);
252 
253 	return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
254 
255 invalid_arg:
256 	pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
257 	       ofs, mtd->size, mtd->writesize);
258 	return -EINVAL;
259 }
260 
goldfish_nand_block_markbad(struct mtd_info * mtd,loff_t ofs)261 static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
262 {
263 	u32 rem;
264 
265 	if (ofs >= mtd->size)
266 		goto invalid_arg;
267 
268 	rem = do_div(ofs, mtd->erasesize);
269 	if (rem)
270 		goto invalid_arg;
271 	ofs *= mtd->erasesize / mtd->writesize;
272 	ofs *= (mtd->writesize + mtd->oobsize);
273 
274 	if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1)
275 		return -EIO;
276 	return 0;
277 
278 invalid_arg:
279 	pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
280 	       ofs, mtd->size, mtd->writesize);
281 	return -EINVAL;
282 }
283 
nand_setup_cmd_params(struct platform_device * pdev,struct goldfish_nand * nand)284 static int nand_setup_cmd_params(struct platform_device *pdev,
285 				 struct goldfish_nand *nand)
286 {
287 	u64 paddr;
288 	unsigned char __iomem  *base = nand->base;
289 
290 	nand->cmd_params = devm_kzalloc(&pdev->dev,
291 					sizeof(struct cmd_params), GFP_KERNEL);
292 	if (!nand->cmd_params)
293 		return -1;
294 
295 	paddr = __pa(nand->cmd_params);
296 	writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
297 	writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW);
298 	return 0;
299 }
300 
goldfish_nand_init_device(struct platform_device * pdev,struct goldfish_nand * nand,int id)301 static int goldfish_nand_init_device(struct platform_device *pdev,
302 				     struct goldfish_nand *nand, int id)
303 {
304 	u32 name_len;
305 	u32 result;
306 	u32 flags;
307 	unsigned char __iomem  *base = nand->base;
308 	struct mtd_info *mtd = &nand->mtd[id];
309 	char *name;
310 
311 	mutex_lock(&nand->lock);
312 	writel(id, base + NAND_DEV);
313 	flags = readl(base + NAND_DEV_FLAGS);
314 	name_len = readl(base + NAND_DEV_NAME_LEN);
315 	mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
316 	mtd->size = readl(base + NAND_DEV_SIZE_LOW);
317 	mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32;
318 	mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
319 	mtd->oobavail = mtd->oobsize;
320 	mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
321 			(mtd->writesize + mtd->oobsize) * mtd->writesize;
322 	do_div(mtd->size, mtd->writesize + mtd->oobsize);
323 	mtd->size *= mtd->writesize;
324 	dev_dbg(&pdev->dev,
325 		"goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
326 		       id, mtd->size, mtd->writesize,
327 		       mtd->oobsize, mtd->erasesize);
328 	mutex_unlock(&nand->lock);
329 
330 	mtd->priv = nand;
331 
332 	name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL);
333 	if (!name)
334 		return -ENOMEM;
335 	mtd->name = name;
336 
337 	result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
338 				   name);
339 	if (result != name_len) {
340 		dev_err(&pdev->dev,
341 			"goldfish_nand_init_device failed to get dev name %d != %d\n",
342 			       result, name_len);
343 		return -ENODEV;
344 	}
345 	((char *)mtd->name)[name_len] = '\0';
346 
347 	/* Setup the MTD structure */
348 	mtd->type = MTD_NANDFLASH;
349 	mtd->flags = MTD_CAP_NANDFLASH;
350 	if (flags & NAND_DEV_FLAG_READ_ONLY)
351 		mtd->flags &= ~MTD_WRITEABLE;
352 	if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP)
353 		nand_setup_cmd_params(pdev, nand);
354 
355 	mtd->owner = THIS_MODULE;
356 	mtd->_erase = goldfish_nand_erase;
357 	mtd->_read = goldfish_nand_read;
358 	mtd->_write = goldfish_nand_write;
359 	mtd->_read_oob = goldfish_nand_read_oob;
360 	mtd->_write_oob = goldfish_nand_write_oob;
361 	mtd->_block_isbad = goldfish_nand_block_isbad;
362 	mtd->_block_markbad = goldfish_nand_block_markbad;
363 
364 	if (mtd_device_register(mtd, NULL, 0))
365 		return -EIO;
366 
367 	return 0;
368 }
369 
goldfish_nand_probe(struct platform_device * pdev)370 static int goldfish_nand_probe(struct platform_device *pdev)
371 {
372 	u32 num_dev;
373 	int i;
374 	int err;
375 	u32 num_dev_working;
376 	u32 version;
377 	struct resource *r;
378 	struct goldfish_nand *nand;
379 	unsigned char __iomem  *base;
380 
381 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
382 	if (!r)
383 		return -ENODEV;
384 
385 	base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
386 	if (!base)
387 		return -ENOMEM;
388 
389 	version = readl(base + NAND_VERSION);
390 	if (version != NAND_VERSION_CURRENT) {
391 		dev_err(&pdev->dev,
392 			"goldfish_nand_init: version mismatch, got %d, expected %d\n",
393 				version, NAND_VERSION_CURRENT);
394 		return -ENODEV;
395 	}
396 	num_dev = readl(base + NAND_NUM_DEV);
397 	if (num_dev == 0)
398 		return -ENODEV;
399 
400 	nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
401 				sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
402 	if (!nand)
403 		return -ENOMEM;
404 
405 	mutex_init(&nand->lock);
406 	nand->base = base;
407 	nand->mtd_count = num_dev;
408 	platform_set_drvdata(pdev, nand);
409 
410 	num_dev_working = 0;
411 	for (i = 0; i < num_dev; i++) {
412 		err = goldfish_nand_init_device(pdev, nand, i);
413 		if (err == 0)
414 			num_dev_working++;
415 	}
416 	if (num_dev_working == 0)
417 		return -ENODEV;
418 	return 0;
419 }
420 
goldfish_nand_remove(struct platform_device * pdev)421 static int goldfish_nand_remove(struct platform_device *pdev)
422 {
423 	struct goldfish_nand *nand = platform_get_drvdata(pdev);
424 	int i;
425 
426 	for (i = 0; i < nand->mtd_count; i++) {
427 		if (nand->mtd[i].name)
428 			mtd_device_unregister(&nand->mtd[i]);
429 	}
430 	return 0;
431 }
432 
433 static struct platform_driver goldfish_nand_driver = {
434 	.probe		= goldfish_nand_probe,
435 	.remove		= goldfish_nand_remove,
436 	.driver = {
437 		.name = "goldfish_nand"
438 	}
439 };
440 
441 module_platform_driver(goldfish_nand_driver);
442 MODULE_LICENSE("GPL");
443