1/*
2 * Copyright (C) 2013 - Virtual Open Systems
3 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/device.h>
16#include <linux/iommu.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/uaccess.h>
22#include <linux/vfio.h>
23
24#include "vfio_platform_private.h"
25
26static DEFINE_MUTEX(driver_lock);
27
28static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
29{
30	int cnt = 0, i;
31
32	while (vdev->get_resource(vdev, cnt))
33		cnt++;
34
35	vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
36				GFP_KERNEL);
37	if (!vdev->regions)
38		return -ENOMEM;
39
40	for (i = 0; i < cnt;  i++) {
41		struct resource *res =
42			vdev->get_resource(vdev, i);
43
44		if (!res)
45			goto err;
46
47		vdev->regions[i].addr = res->start;
48		vdev->regions[i].size = resource_size(res);
49		vdev->regions[i].flags = 0;
50
51		switch (resource_type(res)) {
52		case IORESOURCE_MEM:
53			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
54			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
55			if (!(res->flags & IORESOURCE_READONLY))
56				vdev->regions[i].flags |=
57					VFIO_REGION_INFO_FLAG_WRITE;
58
59			/*
60			 * Only regions addressed with PAGE granularity may be
61			 * MMAPed securely.
62			 */
63			if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
64					!(vdev->regions[i].size & ~PAGE_MASK))
65				vdev->regions[i].flags |=
66					VFIO_REGION_INFO_FLAG_MMAP;
67
68			break;
69		case IORESOURCE_IO:
70			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
71			break;
72		default:
73			goto err;
74		}
75	}
76
77	vdev->num_regions = cnt;
78
79	return 0;
80err:
81	kfree(vdev->regions);
82	return -EINVAL;
83}
84
85static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
86{
87	int i;
88
89	for (i = 0; i < vdev->num_regions; i++)
90		iounmap(vdev->regions[i].ioaddr);
91
92	vdev->num_regions = 0;
93	kfree(vdev->regions);
94}
95
96static void vfio_platform_release(void *device_data)
97{
98	struct vfio_platform_device *vdev = device_data;
99
100	mutex_lock(&driver_lock);
101
102	if (!(--vdev->refcnt)) {
103		vfio_platform_regions_cleanup(vdev);
104		vfio_platform_irq_cleanup(vdev);
105	}
106
107	mutex_unlock(&driver_lock);
108
109	module_put(THIS_MODULE);
110}
111
112static int vfio_platform_open(void *device_data)
113{
114	struct vfio_platform_device *vdev = device_data;
115	int ret;
116
117	if (!try_module_get(THIS_MODULE))
118		return -ENODEV;
119
120	mutex_lock(&driver_lock);
121
122	if (!vdev->refcnt) {
123		ret = vfio_platform_regions_init(vdev);
124		if (ret)
125			goto err_reg;
126
127		ret = vfio_platform_irq_init(vdev);
128		if (ret)
129			goto err_irq;
130	}
131
132	vdev->refcnt++;
133
134	mutex_unlock(&driver_lock);
135	return 0;
136
137err_irq:
138	vfio_platform_regions_cleanup(vdev);
139err_reg:
140	mutex_unlock(&driver_lock);
141	module_put(THIS_MODULE);
142	return ret;
143}
144
145static long vfio_platform_ioctl(void *device_data,
146				unsigned int cmd, unsigned long arg)
147{
148	struct vfio_platform_device *vdev = device_data;
149	unsigned long minsz;
150
151	if (cmd == VFIO_DEVICE_GET_INFO) {
152		struct vfio_device_info info;
153
154		minsz = offsetofend(struct vfio_device_info, num_irqs);
155
156		if (copy_from_user(&info, (void __user *)arg, minsz))
157			return -EFAULT;
158
159		if (info.argsz < minsz)
160			return -EINVAL;
161
162		info.flags = vdev->flags;
163		info.num_regions = vdev->num_regions;
164		info.num_irqs = vdev->num_irqs;
165
166		return copy_to_user((void __user *)arg, &info, minsz) ?
167			-EFAULT : 0;
168
169	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
170		struct vfio_region_info info;
171
172		minsz = offsetofend(struct vfio_region_info, offset);
173
174		if (copy_from_user(&info, (void __user *)arg, minsz))
175			return -EFAULT;
176
177		if (info.argsz < minsz)
178			return -EINVAL;
179
180		if (info.index >= vdev->num_regions)
181			return -EINVAL;
182
183		/* map offset to the physical address  */
184		info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
185		info.size = vdev->regions[info.index].size;
186		info.flags = vdev->regions[info.index].flags;
187
188		return copy_to_user((void __user *)arg, &info, minsz) ?
189			-EFAULT : 0;
190
191	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
192		struct vfio_irq_info info;
193
194		minsz = offsetofend(struct vfio_irq_info, count);
195
196		if (copy_from_user(&info, (void __user *)arg, minsz))
197			return -EFAULT;
198
199		if (info.argsz < minsz)
200			return -EINVAL;
201
202		if (info.index >= vdev->num_irqs)
203			return -EINVAL;
204
205		info.flags = vdev->irqs[info.index].flags;
206		info.count = vdev->irqs[info.index].count;
207
208		return copy_to_user((void __user *)arg, &info, minsz) ?
209			-EFAULT : 0;
210
211	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
212		struct vfio_irq_set hdr;
213		u8 *data = NULL;
214		int ret = 0;
215
216		minsz = offsetofend(struct vfio_irq_set, count);
217
218		if (copy_from_user(&hdr, (void __user *)arg, minsz))
219			return -EFAULT;
220
221		if (hdr.argsz < minsz)
222			return -EINVAL;
223
224		if (hdr.index >= vdev->num_irqs)
225			return -EINVAL;
226
227		if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
228				  VFIO_IRQ_SET_ACTION_TYPE_MASK))
229			return -EINVAL;
230
231		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
232			size_t size;
233
234			if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
235				size = sizeof(uint8_t);
236			else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
237				size = sizeof(int32_t);
238			else
239				return -EINVAL;
240
241			if (hdr.argsz - minsz < size)
242				return -EINVAL;
243
244			data = memdup_user((void __user *)(arg + minsz), size);
245			if (IS_ERR(data))
246				return PTR_ERR(data);
247		}
248
249		mutex_lock(&vdev->igate);
250
251		ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
252						   hdr.start, hdr.count, data);
253		mutex_unlock(&vdev->igate);
254		kfree(data);
255
256		return ret;
257
258	} else if (cmd == VFIO_DEVICE_RESET)
259		return -EINVAL;
260
261	return -ENOTTY;
262}
263
264static ssize_t vfio_platform_read_mmio(struct vfio_platform_region reg,
265				       char __user *buf, size_t count,
266				       loff_t off)
267{
268	unsigned int done = 0;
269
270	if (!reg.ioaddr) {
271		reg.ioaddr =
272			ioremap_nocache(reg.addr, reg.size);
273
274		if (!reg.ioaddr)
275			return -ENOMEM;
276	}
277
278	while (count) {
279		size_t filled;
280
281		if (count >= 4 && !(off % 4)) {
282			u32 val;
283
284			val = ioread32(reg.ioaddr + off);
285			if (copy_to_user(buf, &val, 4))
286				goto err;
287
288			filled = 4;
289		} else if (count >= 2 && !(off % 2)) {
290			u16 val;
291
292			val = ioread16(reg.ioaddr + off);
293			if (copy_to_user(buf, &val, 2))
294				goto err;
295
296			filled = 2;
297		} else {
298			u8 val;
299
300			val = ioread8(reg.ioaddr + off);
301			if (copy_to_user(buf, &val, 1))
302				goto err;
303
304			filled = 1;
305		}
306
307
308		count -= filled;
309		done += filled;
310		off += filled;
311		buf += filled;
312	}
313
314	return done;
315err:
316	return -EFAULT;
317}
318
319static ssize_t vfio_platform_read(void *device_data, char __user *buf,
320				  size_t count, loff_t *ppos)
321{
322	struct vfio_platform_device *vdev = device_data;
323	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
324	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
325
326	if (index >= vdev->num_regions)
327		return -EINVAL;
328
329	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
330		return -EINVAL;
331
332	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
333		return vfio_platform_read_mmio(vdev->regions[index],
334							buf, count, off);
335	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
336		return -EINVAL; /* not implemented */
337
338	return -EINVAL;
339}
340
341static ssize_t vfio_platform_write_mmio(struct vfio_platform_region reg,
342					const char __user *buf, size_t count,
343					loff_t off)
344{
345	unsigned int done = 0;
346
347	if (!reg.ioaddr) {
348		reg.ioaddr =
349			ioremap_nocache(reg.addr, reg.size);
350
351		if (!reg.ioaddr)
352			return -ENOMEM;
353	}
354
355	while (count) {
356		size_t filled;
357
358		if (count >= 4 && !(off % 4)) {
359			u32 val;
360
361			if (copy_from_user(&val, buf, 4))
362				goto err;
363			iowrite32(val, reg.ioaddr + off);
364
365			filled = 4;
366		} else if (count >= 2 && !(off % 2)) {
367			u16 val;
368
369			if (copy_from_user(&val, buf, 2))
370				goto err;
371			iowrite16(val, reg.ioaddr + off);
372
373			filled = 2;
374		} else {
375			u8 val;
376
377			if (copy_from_user(&val, buf, 1))
378				goto err;
379			iowrite8(val, reg.ioaddr + off);
380
381			filled = 1;
382		}
383
384		count -= filled;
385		done += filled;
386		off += filled;
387		buf += filled;
388	}
389
390	return done;
391err:
392	return -EFAULT;
393}
394
395static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
396				   size_t count, loff_t *ppos)
397{
398	struct vfio_platform_device *vdev = device_data;
399	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
400	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
401
402	if (index >= vdev->num_regions)
403		return -EINVAL;
404
405	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
406		return -EINVAL;
407
408	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
409		return vfio_platform_write_mmio(vdev->regions[index],
410							buf, count, off);
411	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
412		return -EINVAL; /* not implemented */
413
414	return -EINVAL;
415}
416
417static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
418				   struct vm_area_struct *vma)
419{
420	u64 req_len, pgoff, req_start;
421
422	req_len = vma->vm_end - vma->vm_start;
423	pgoff = vma->vm_pgoff &
424		((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
425	req_start = pgoff << PAGE_SHIFT;
426
427	if (region.size < PAGE_SIZE || req_start + req_len > region.size)
428		return -EINVAL;
429
430	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
431	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
432
433	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
434			       req_len, vma->vm_page_prot);
435}
436
437static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
438{
439	struct vfio_platform_device *vdev = device_data;
440	unsigned int index;
441
442	index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
443
444	if (vma->vm_end < vma->vm_start)
445		return -EINVAL;
446	if (!(vma->vm_flags & VM_SHARED))
447		return -EINVAL;
448	if (index >= vdev->num_regions)
449		return -EINVAL;
450	if (vma->vm_start & ~PAGE_MASK)
451		return -EINVAL;
452	if (vma->vm_end & ~PAGE_MASK)
453		return -EINVAL;
454
455	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
456		return -EINVAL;
457
458	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
459			&& (vma->vm_flags & VM_READ))
460		return -EINVAL;
461
462	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
463			&& (vma->vm_flags & VM_WRITE))
464		return -EINVAL;
465
466	vma->vm_private_data = vdev;
467
468	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
469		return vfio_platform_mmap_mmio(vdev->regions[index], vma);
470
471	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
472		return -EINVAL; /* not implemented */
473
474	return -EINVAL;
475}
476
477static const struct vfio_device_ops vfio_platform_ops = {
478	.name		= "vfio-platform",
479	.open		= vfio_platform_open,
480	.release	= vfio_platform_release,
481	.ioctl		= vfio_platform_ioctl,
482	.read		= vfio_platform_read,
483	.write		= vfio_platform_write,
484	.mmap		= vfio_platform_mmap,
485};
486
487int vfio_platform_probe_common(struct vfio_platform_device *vdev,
488			       struct device *dev)
489{
490	struct iommu_group *group;
491	int ret;
492
493	if (!vdev)
494		return -EINVAL;
495
496	group = iommu_group_get(dev);
497	if (!group) {
498		pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
499		return -EINVAL;
500	}
501
502	ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
503	if (ret) {
504		iommu_group_put(group);
505		return ret;
506	}
507
508	mutex_init(&vdev->igate);
509
510	return 0;
511}
512EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
513
514struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
515{
516	struct vfio_platform_device *vdev;
517
518	vdev = vfio_del_group_dev(dev);
519	if (vdev)
520		iommu_group_put(dev->iommu_group);
521
522	return vdev;
523}
524EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
525