1 /*
2  * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License version
6  * 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; see the file COPYING.  If not, write to
15  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16  * USA.
17  *
18  * Implementation of a generic nvm manager for Open-Channel SSDs.
19  */
20 
21 #include "gennvm.h"
22 
gennvm_blocks_free(struct nvm_dev * dev)23 static void gennvm_blocks_free(struct nvm_dev *dev)
24 {
25 	struct gen_nvm *gn = dev->mp;
26 	struct gen_lun *lun;
27 	int i;
28 
29 	gennvm_for_each_lun(gn, lun, i) {
30 		if (!lun->vlun.blocks)
31 			break;
32 		vfree(lun->vlun.blocks);
33 	}
34 }
35 
gennvm_luns_free(struct nvm_dev * dev)36 static void gennvm_luns_free(struct nvm_dev *dev)
37 {
38 	struct gen_nvm *gn = dev->mp;
39 
40 	kfree(gn->luns);
41 }
42 
gennvm_luns_init(struct nvm_dev * dev,struct gen_nvm * gn)43 static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
44 {
45 	struct gen_lun *lun;
46 	int i;
47 
48 	gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
49 	if (!gn->luns)
50 		return -ENOMEM;
51 
52 	gennvm_for_each_lun(gn, lun, i) {
53 		spin_lock_init(&lun->vlun.lock);
54 		INIT_LIST_HEAD(&lun->free_list);
55 		INIT_LIST_HEAD(&lun->used_list);
56 		INIT_LIST_HEAD(&lun->bb_list);
57 
58 		lun->reserved_blocks = 2; /* for GC only */
59 		lun->vlun.id = i;
60 		lun->vlun.lun_id = i % dev->luns_per_chnl;
61 		lun->vlun.chnl_id = i / dev->luns_per_chnl;
62 		lun->vlun.nr_free_blocks = dev->blks_per_lun;
63 		lun->vlun.nr_inuse_blocks = 0;
64 		lun->vlun.nr_bad_blocks = 0;
65 	}
66 	return 0;
67 }
68 
gennvm_block_bb(struct ppa_addr ppa,int nr_blocks,u8 * blks,void * private)69 static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
70 								void *private)
71 {
72 	struct gen_nvm *gn = private;
73 	struct nvm_dev *dev = gn->dev;
74 	struct gen_lun *lun;
75 	struct nvm_block *blk;
76 	int i;
77 
78 	lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
79 
80 	for (i = 0; i < nr_blocks; i++) {
81 		if (blks[i] == 0)
82 			continue;
83 
84 		blk = &lun->vlun.blocks[i];
85 		if (!blk) {
86 			pr_err("gennvm: BB data is out of bounds.\n");
87 			return -EINVAL;
88 		}
89 
90 		list_move_tail(&blk->list, &lun->bb_list);
91 		lun->vlun.nr_bad_blocks++;
92 	}
93 
94 	return 0;
95 }
96 
gennvm_block_map(u64 slba,u32 nlb,__le64 * entries,void * private)97 static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
98 {
99 	struct nvm_dev *dev = private;
100 	struct gen_nvm *gn = dev->mp;
101 	sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
102 	u64 elba = slba + nlb;
103 	struct gen_lun *lun;
104 	struct nvm_block *blk;
105 	u64 i;
106 	int lun_id;
107 
108 	if (unlikely(elba > dev->total_pages)) {
109 		pr_err("gennvm: L2P data from device is out of bounds!\n");
110 		return -EINVAL;
111 	}
112 
113 	for (i = 0; i < nlb; i++) {
114 		u64 pba = le64_to_cpu(entries[i]);
115 
116 		if (unlikely(pba >= max_pages && pba != U64_MAX)) {
117 			pr_err("gennvm: L2P data entry is out of bounds!\n");
118 			return -EINVAL;
119 		}
120 
121 		/* Address zero is a special one. The first page on a disk is
122 		 * protected. It often holds internal device boot
123 		 * information.
124 		 */
125 		if (!pba)
126 			continue;
127 
128 		/* resolve block from physical address */
129 		lun_id = div_u64(pba, dev->sec_per_lun);
130 		lun = &gn->luns[lun_id];
131 
132 		/* Calculate block offset into lun */
133 		pba = pba - (dev->sec_per_lun * lun_id);
134 		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
135 
136 		if (!blk->type) {
137 			/* at this point, we don't know anything about the
138 			 * block. It's up to the FTL on top to re-etablish the
139 			 * block state
140 			 */
141 			list_move_tail(&blk->list, &lun->used_list);
142 			blk->type = 1;
143 			lun->vlun.nr_free_blocks--;
144 			lun->vlun.nr_inuse_blocks++;
145 		}
146 	}
147 
148 	return 0;
149 }
150 
gennvm_blocks_init(struct nvm_dev * dev,struct gen_nvm * gn)151 static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
152 {
153 	struct gen_lun *lun;
154 	struct nvm_block *block;
155 	sector_t lun_iter, blk_iter, cur_block_id = 0;
156 	int ret;
157 
158 	gennvm_for_each_lun(gn, lun, lun_iter) {
159 		lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
160 							dev->blks_per_lun);
161 		if (!lun->vlun.blocks)
162 			return -ENOMEM;
163 
164 		for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
165 			block = &lun->vlun.blocks[blk_iter];
166 
167 			INIT_LIST_HEAD(&block->list);
168 
169 			block->lun = &lun->vlun;
170 			block->id = cur_block_id++;
171 
172 			/* First block is reserved for device */
173 			if (unlikely(lun_iter == 0 && blk_iter == 0)) {
174 				lun->vlun.nr_free_blocks--;
175 				continue;
176 			}
177 
178 			list_add_tail(&block->list, &lun->free_list);
179 		}
180 
181 		if (dev->ops->get_bb_tbl) {
182 			struct ppa_addr ppa;
183 
184 			ppa.ppa = 0;
185 			ppa.g.ch = lun->vlun.chnl_id;
186 			ppa.g.lun = lun->vlun.id;
187 			ppa = generic_to_dev_addr(dev, ppa);
188 
189 			ret = dev->ops->get_bb_tbl(dev, ppa,
190 						dev->blks_per_lun,
191 						gennvm_block_bb, gn);
192 			if (ret)
193 				pr_err("gennvm: could not read BB table\n");
194 		}
195 	}
196 
197 	if (dev->ops->get_l2p_tbl) {
198 		ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
199 							gennvm_block_map, dev);
200 		if (ret) {
201 			pr_err("gennvm: could not read L2P table.\n");
202 			pr_warn("gennvm: default block initialization");
203 		}
204 	}
205 
206 	return 0;
207 }
208 
gennvm_free(struct nvm_dev * dev)209 static void gennvm_free(struct nvm_dev *dev)
210 {
211 	gennvm_blocks_free(dev);
212 	gennvm_luns_free(dev);
213 	kfree(dev->mp);
214 	dev->mp = NULL;
215 }
216 
gennvm_register(struct nvm_dev * dev)217 static int gennvm_register(struct nvm_dev *dev)
218 {
219 	struct gen_nvm *gn;
220 	int ret;
221 
222 	if (!try_module_get(THIS_MODULE))
223 		return -ENODEV;
224 
225 	gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
226 	if (!gn)
227 		return -ENOMEM;
228 
229 	gn->dev = dev;
230 	gn->nr_luns = dev->nr_luns;
231 	dev->mp = gn;
232 
233 	ret = gennvm_luns_init(dev, gn);
234 	if (ret) {
235 		pr_err("gennvm: could not initialize luns\n");
236 		goto err;
237 	}
238 
239 	ret = gennvm_blocks_init(dev, gn);
240 	if (ret) {
241 		pr_err("gennvm: could not initialize blocks\n");
242 		goto err;
243 	}
244 
245 	return 1;
246 err:
247 	gennvm_free(dev);
248 	module_put(THIS_MODULE);
249 	return ret;
250 }
251 
gennvm_unregister(struct nvm_dev * dev)252 static void gennvm_unregister(struct nvm_dev *dev)
253 {
254 	gennvm_free(dev);
255 	module_put(THIS_MODULE);
256 }
257 
gennvm_get_blk(struct nvm_dev * dev,struct nvm_lun * vlun,unsigned long flags)258 static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
259 				struct nvm_lun *vlun, unsigned long flags)
260 {
261 	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
262 	struct nvm_block *blk = NULL;
263 	int is_gc = flags & NVM_IOTYPE_GC;
264 
265 	spin_lock(&vlun->lock);
266 
267 	if (list_empty(&lun->free_list)) {
268 		pr_err_ratelimited("gennvm: lun %u have no free pages available",
269 								lun->vlun.id);
270 		goto out;
271 	}
272 
273 	if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
274 		goto out;
275 
276 	blk = list_first_entry(&lun->free_list, struct nvm_block, list);
277 	list_move_tail(&blk->list, &lun->used_list);
278 	blk->type = 1;
279 
280 	lun->vlun.nr_free_blocks--;
281 	lun->vlun.nr_inuse_blocks++;
282 
283 out:
284 	spin_unlock(&vlun->lock);
285 	return blk;
286 }
287 
gennvm_put_blk(struct nvm_dev * dev,struct nvm_block * blk)288 static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
289 {
290 	struct nvm_lun *vlun = blk->lun;
291 	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
292 
293 	spin_lock(&vlun->lock);
294 
295 	switch (blk->type) {
296 	case 1:
297 		list_move_tail(&blk->list, &lun->free_list);
298 		lun->vlun.nr_free_blocks++;
299 		lun->vlun.nr_inuse_blocks--;
300 		blk->type = 0;
301 		break;
302 	case 2:
303 		list_move_tail(&blk->list, &lun->bb_list);
304 		lun->vlun.nr_bad_blocks++;
305 		lun->vlun.nr_inuse_blocks--;
306 		break;
307 	default:
308 		WARN_ON_ONCE(1);
309 		pr_err("gennvm: erroneous block type (%lu -> %u)\n",
310 							blk->id, blk->type);
311 		list_move_tail(&blk->list, &lun->bb_list);
312 		lun->vlun.nr_bad_blocks++;
313 		lun->vlun.nr_inuse_blocks--;
314 	}
315 
316 	spin_unlock(&vlun->lock);
317 }
318 
gennvm_addr_to_generic_mode(struct nvm_dev * dev,struct nvm_rq * rqd)319 static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
320 {
321 	int i;
322 
323 	if (rqd->nr_pages > 1) {
324 		for (i = 0; i < rqd->nr_pages; i++)
325 			rqd->ppa_list[i] = dev_to_generic_addr(dev,
326 							rqd->ppa_list[i]);
327 	} else {
328 		rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
329 	}
330 }
331 
gennvm_generic_to_addr_mode(struct nvm_dev * dev,struct nvm_rq * rqd)332 static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
333 {
334 	int i;
335 
336 	if (rqd->nr_pages > 1) {
337 		for (i = 0; i < rqd->nr_pages; i++)
338 			rqd->ppa_list[i] = generic_to_dev_addr(dev,
339 							rqd->ppa_list[i]);
340 	} else {
341 		rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
342 	}
343 }
344 
gennvm_submit_io(struct nvm_dev * dev,struct nvm_rq * rqd)345 static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
346 {
347 	if (!dev->ops->submit_io)
348 		return 0;
349 
350 	/* Convert address space */
351 	gennvm_generic_to_addr_mode(dev, rqd);
352 
353 	rqd->dev = dev;
354 	return dev->ops->submit_io(dev, rqd);
355 }
356 
gennvm_blk_set_type(struct nvm_dev * dev,struct ppa_addr * ppa,int type)357 static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
358 								int type)
359 {
360 	struct gen_nvm *gn = dev->mp;
361 	struct gen_lun *lun;
362 	struct nvm_block *blk;
363 
364 	if (unlikely(ppa->g.ch > dev->nr_chnls ||
365 					ppa->g.lun > dev->luns_per_chnl ||
366 					ppa->g.blk > dev->blks_per_lun)) {
367 		WARN_ON_ONCE(1);
368 		pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
369 				ppa->g.ch, dev->nr_chnls,
370 				ppa->g.lun, dev->luns_per_chnl,
371 				ppa->g.blk, dev->blks_per_lun);
372 		return;
373 	}
374 
375 	lun = &gn->luns[ppa->g.lun * ppa->g.ch];
376 	blk = &lun->vlun.blocks[ppa->g.blk];
377 
378 	/* will be moved to bb list on put_blk from target */
379 	blk->type = type;
380 }
381 
382 /* mark block bad. It is expected the target recover from the error. */
gennvm_mark_blk_bad(struct nvm_dev * dev,struct nvm_rq * rqd)383 static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
384 {
385 	int i;
386 
387 	if (!dev->ops->set_bb_tbl)
388 		return;
389 
390 	if (dev->ops->set_bb_tbl(dev, rqd, 1))
391 		return;
392 
393 	gennvm_addr_to_generic_mode(dev, rqd);
394 
395 	/* look up blocks and mark them as bad */
396 	if (rqd->nr_pages > 1)
397 		for (i = 0; i < rqd->nr_pages; i++)
398 			gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
399 	else
400 		gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
401 }
402 
gennvm_end_io(struct nvm_rq * rqd,int error)403 static int gennvm_end_io(struct nvm_rq *rqd, int error)
404 {
405 	struct nvm_tgt_instance *ins = rqd->ins;
406 	int ret = 0;
407 
408 	switch (error) {
409 	case NVM_RSP_SUCCESS:
410 		break;
411 	case NVM_RSP_ERR_EMPTYPAGE:
412 		break;
413 	case NVM_RSP_ERR_FAILWRITE:
414 		gennvm_mark_blk_bad(rqd->dev, rqd);
415 	default:
416 		ret++;
417 	}
418 
419 	ret += ins->tt->end_io(rqd, error);
420 
421 	return ret;
422 }
423 
gennvm_erase_blk(struct nvm_dev * dev,struct nvm_block * blk,unsigned long flags)424 static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
425 							unsigned long flags)
426 {
427 	int plane_cnt = 0, pl_idx, ret;
428 	struct ppa_addr addr;
429 	struct nvm_rq rqd;
430 
431 	if (!dev->ops->erase_block)
432 		return 0;
433 
434 	addr = block_to_ppa(dev, blk);
435 
436 	if (dev->plane_mode == NVM_PLANE_SINGLE) {
437 		rqd.nr_pages = 1;
438 		rqd.ppa_addr = addr;
439 	} else {
440 		plane_cnt = (1 << dev->plane_mode);
441 		rqd.nr_pages = plane_cnt;
442 
443 		rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
444 							&rqd.dma_ppa_list);
445 		if (!rqd.ppa_list) {
446 			pr_err("gennvm: failed to allocate dma memory\n");
447 			return -ENOMEM;
448 		}
449 
450 		for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
451 			addr.g.pl = pl_idx;
452 			rqd.ppa_list[pl_idx] = addr;
453 		}
454 	}
455 
456 	gennvm_generic_to_addr_mode(dev, &rqd);
457 
458 	ret = dev->ops->erase_block(dev, &rqd);
459 
460 	if (plane_cnt)
461 		nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
462 
463 	return ret;
464 }
465 
gennvm_get_lun(struct nvm_dev * dev,int lunid)466 static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
467 {
468 	struct gen_nvm *gn = dev->mp;
469 
470 	return &gn->luns[lunid].vlun;
471 }
472 
gennvm_lun_info_print(struct nvm_dev * dev)473 static void gennvm_lun_info_print(struct nvm_dev *dev)
474 {
475 	struct gen_nvm *gn = dev->mp;
476 	struct gen_lun *lun;
477 	unsigned int i;
478 
479 
480 	gennvm_for_each_lun(gn, lun, i) {
481 		spin_lock(&lun->vlun.lock);
482 
483 		pr_info("%s: lun%8u\t%u\t%u\t%u\n",
484 				dev->name, i,
485 				lun->vlun.nr_free_blocks,
486 				lun->vlun.nr_inuse_blocks,
487 				lun->vlun.nr_bad_blocks);
488 
489 		spin_unlock(&lun->vlun.lock);
490 	}
491 }
492 
493 static struct nvmm_type gennvm = {
494 	.name		= "gennvm",
495 	.version	= {0, 1, 0},
496 
497 	.register_mgr	= gennvm_register,
498 	.unregister_mgr	= gennvm_unregister,
499 
500 	.get_blk	= gennvm_get_blk,
501 	.put_blk	= gennvm_put_blk,
502 
503 	.submit_io	= gennvm_submit_io,
504 	.end_io		= gennvm_end_io,
505 	.erase_blk	= gennvm_erase_blk,
506 
507 	.get_lun	= gennvm_get_lun,
508 	.lun_info_print = gennvm_lun_info_print,
509 };
510 
gennvm_module_init(void)511 static int __init gennvm_module_init(void)
512 {
513 	return nvm_register_mgr(&gennvm);
514 }
515 
gennvm_module_exit(void)516 static void gennvm_module_exit(void)
517 {
518 	nvm_unregister_mgr(&gennvm);
519 }
520 
521 module_init(gennvm_module_init);
522 module_exit(gennvm_module_exit);
523 MODULE_LICENSE("GPL v2");
524 MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");
525