1/*
2 * PCI Backend - Provides a Virtual PCI bus (with real devices)
3 *               to the frontend
4 *
5 *   Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/list.h>
11#include <linux/slab.h>
12#include <linux/pci.h>
13#include <linux/mutex.h>
14#include "pciback.h"
15
16#define PCI_SLOT_MAX 32
17
18struct vpci_dev_data {
19	/* Access to dev_list must be protected by lock */
20	struct list_head dev_list[PCI_SLOT_MAX];
21	struct mutex lock;
22};
23
24static inline struct list_head *list_first(struct list_head *head)
25{
26	return head->next;
27}
28
29static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
30					       unsigned int domain,
31					       unsigned int bus,
32					       unsigned int devfn)
33{
34	struct pci_dev_entry *entry;
35	struct pci_dev *dev = NULL;
36	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
37
38	if (domain != 0 || bus != 0)
39		return NULL;
40
41	if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
42		mutex_lock(&vpci_dev->lock);
43
44		list_for_each_entry(entry,
45				    &vpci_dev->dev_list[PCI_SLOT(devfn)],
46				    list) {
47			if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
48				dev = entry->dev;
49				break;
50			}
51		}
52
53		mutex_unlock(&vpci_dev->lock);
54	}
55	return dev;
56}
57
58static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
59{
60	if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
61	    && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
62		return 1;
63
64	return 0;
65}
66
67static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
68				   struct pci_dev *dev, int devid,
69				   publish_pci_dev_cb publish_cb)
70{
71	int err = 0, slot, func = -1;
72	struct pci_dev_entry *t, *dev_entry;
73	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
74
75	if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
76		err = -EFAULT;
77		xenbus_dev_fatal(pdev->xdev, err,
78				 "Can't export bridges on the virtual PCI bus");
79		goto out;
80	}
81
82	dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
83	if (!dev_entry) {
84		err = -ENOMEM;
85		xenbus_dev_fatal(pdev->xdev, err,
86				 "Error adding entry to virtual PCI bus");
87		goto out;
88	}
89
90	dev_entry->dev = dev;
91
92	mutex_lock(&vpci_dev->lock);
93
94	/*
95	 * Keep multi-function devices together on the virtual PCI bus, except
96	 * virtual functions.
97	 */
98	if (!dev->is_virtfn) {
99		for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
100			if (list_empty(&vpci_dev->dev_list[slot]))
101				continue;
102
103			t = list_entry(list_first(&vpci_dev->dev_list[slot]),
104				       struct pci_dev_entry, list);
105
106			if (match_slot(dev, t->dev)) {
107				pr_info("vpci: %s: assign to virtual slot %d func %d\n",
108					pci_name(dev), slot,
109					PCI_FUNC(dev->devfn));
110				list_add_tail(&dev_entry->list,
111					      &vpci_dev->dev_list[slot]);
112				func = PCI_FUNC(dev->devfn);
113				goto unlock;
114			}
115		}
116	}
117
118	/* Assign to a new slot on the virtual PCI bus */
119	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
120		if (list_empty(&vpci_dev->dev_list[slot])) {
121			pr_info("vpci: %s: assign to virtual slot %d\n",
122				pci_name(dev), slot);
123			list_add_tail(&dev_entry->list,
124				      &vpci_dev->dev_list[slot]);
125			func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
126			goto unlock;
127		}
128	}
129
130	err = -ENOMEM;
131	xenbus_dev_fatal(pdev->xdev, err,
132			 "No more space on root virtual PCI bus");
133
134unlock:
135	mutex_unlock(&vpci_dev->lock);
136
137	/* Publish this device. */
138	if (!err)
139		err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
140	else
141		kfree(dev_entry);
142
143out:
144	return err;
145}
146
147static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
148					struct pci_dev *dev, bool lock)
149{
150	int slot;
151	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
152	struct pci_dev *found_dev = NULL;
153
154	mutex_lock(&vpci_dev->lock);
155
156	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
157		struct pci_dev_entry *e;
158
159		list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
160			if (e->dev == dev) {
161				list_del(&e->list);
162				found_dev = e->dev;
163				kfree(e);
164				goto out;
165			}
166		}
167	}
168
169out:
170	mutex_unlock(&vpci_dev->lock);
171
172	if (found_dev) {
173		if (lock)
174			device_lock(&found_dev->dev);
175		pcistub_put_pci_dev(found_dev);
176		if (lock)
177			device_unlock(&found_dev->dev);
178	}
179}
180
181static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
182{
183	int slot;
184	struct vpci_dev_data *vpci_dev;
185
186	vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
187	if (!vpci_dev)
188		return -ENOMEM;
189
190	mutex_init(&vpci_dev->lock);
191
192	for (slot = 0; slot < PCI_SLOT_MAX; slot++)
193		INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
194
195	pdev->pci_dev_data = vpci_dev;
196
197	return 0;
198}
199
200static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
201					 publish_pci_root_cb publish_cb)
202{
203	/* The Virtual PCI bus has only one root */
204	return publish_cb(pdev, 0, 0);
205}
206
207static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
208{
209	int slot;
210	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
211
212	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
213		struct pci_dev_entry *e, *tmp;
214		list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
215					 list) {
216			struct pci_dev *dev = e->dev;
217			list_del(&e->list);
218			device_lock(&dev->dev);
219			pcistub_put_pci_dev(dev);
220			device_unlock(&dev->dev);
221			kfree(e);
222		}
223	}
224
225	kfree(vpci_dev);
226	pdev->pci_dev_data = NULL;
227}
228
229static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
230					struct xen_pcibk_device *pdev,
231					unsigned int *domain, unsigned int *bus,
232					unsigned int *devfn)
233{
234	struct pci_dev_entry *entry;
235	struct pci_dev *dev = NULL;
236	struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
237	int found = 0, slot;
238
239	mutex_lock(&vpci_dev->lock);
240	for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
241		list_for_each_entry(entry,
242			    &vpci_dev->dev_list[slot],
243			    list) {
244			dev = entry->dev;
245			if (dev && dev->bus->number == pcidev->bus->number
246				&& pci_domain_nr(dev->bus) ==
247					pci_domain_nr(pcidev->bus)
248				&& dev->devfn == pcidev->devfn) {
249				found = 1;
250				*domain = 0;
251				*bus = 0;
252				*devfn = PCI_DEVFN(slot,
253					 PCI_FUNC(pcidev->devfn));
254			}
255		}
256	}
257	mutex_unlock(&vpci_dev->lock);
258	return found;
259}
260
261const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
262	.name		= "vpci",
263	.init		= __xen_pcibk_init_devices,
264	.free		= __xen_pcibk_release_devices,
265	.find		= __xen_pcibk_get_pcifront_dev,
266	.publish	= __xen_pcibk_publish_pci_roots,
267	.release	= __xen_pcibk_release_pci_dev,
268	.add		= __xen_pcibk_add_pci_dev,
269	.get		= __xen_pcibk_get_pci_dev,
270};
271