1/*
2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * Derived from ca91c042.c by Michael Wyrick
11 *
12 * This program is free software; you can redistribute  it and/or modify it
13 * under  the terms of  the GNU General  Public License as published by the
14 * Free Software Foundation;  either version 2 of the  License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/pci.h>
23#include <linux/dma-mapping.h>
24#include <linux/poll.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/time.h>
30#include <linux/io.h>
31#include <linux/uaccess.h>
32#include <linux/vme.h>
33
34#include "../vme_bridge.h"
35#include "vme_ca91cx42.h"
36
37static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
38static void ca91cx42_remove(struct pci_dev *);
39
40/* Module parameters */
41static int geoid;
42
43static const char driver_name[] = "vme_ca91cx42";
44
45static const struct pci_device_id ca91cx42_ids[] = {
46	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
47	{ },
48};
49
50static struct pci_driver ca91cx42_driver = {
51	.name = driver_name,
52	.id_table = ca91cx42_ids,
53	.probe = ca91cx42_probe,
54	.remove = ca91cx42_remove,
55};
56
57static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
58{
59	wake_up(&bridge->dma_queue);
60
61	return CA91CX42_LINT_DMA;
62}
63
64static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
65{
66	int i;
67	u32 serviced = 0;
68
69	for (i = 0; i < 4; i++) {
70		if (stat & CA91CX42_LINT_LM[i]) {
71			/* We only enable interrupts if the callback is set */
72			bridge->lm_callback[i](i);
73			serviced |= CA91CX42_LINT_LM[i];
74		}
75	}
76
77	return serviced;
78}
79
80/* XXX This needs to be split into 4 queues */
81static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
82{
83	wake_up(&bridge->mbox_queue);
84
85	return CA91CX42_LINT_MBOX;
86}
87
88static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
89{
90	wake_up(&bridge->iack_queue);
91
92	return CA91CX42_LINT_SW_IACK;
93}
94
95static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
96{
97	int val;
98	struct ca91cx42_driver *bridge;
99
100	bridge = ca91cx42_bridge->driver_priv;
101
102	val = ioread32(bridge->base + DGCS);
103
104	if (!(val & 0x00000800)) {
105		dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
106			"Read Error DGCS=%08X\n", val);
107	}
108
109	return CA91CX42_LINT_VERR;
110}
111
112static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
113{
114	int val;
115	struct ca91cx42_driver *bridge;
116
117	bridge = ca91cx42_bridge->driver_priv;
118
119	val = ioread32(bridge->base + DGCS);
120
121	if (!(val & 0x00000800))
122		dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
123			"Read Error DGCS=%08X\n", val);
124
125	return CA91CX42_LINT_LERR;
126}
127
128
129static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
130	int stat)
131{
132	int vec, i, serviced = 0;
133	struct ca91cx42_driver *bridge;
134
135	bridge = ca91cx42_bridge->driver_priv;
136
137
138	for (i = 7; i > 0; i--) {
139		if (stat & (1 << i)) {
140			vec = ioread32(bridge->base +
141				CA91CX42_V_STATID[i]) & 0xff;
142
143			vme_irq_handler(ca91cx42_bridge, i, vec);
144
145			serviced |= (1 << i);
146		}
147	}
148
149	return serviced;
150}
151
152static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
153{
154	u32 stat, enable, serviced = 0;
155	struct vme_bridge *ca91cx42_bridge;
156	struct ca91cx42_driver *bridge;
157
158	ca91cx42_bridge = ptr;
159
160	bridge = ca91cx42_bridge->driver_priv;
161
162	enable = ioread32(bridge->base + LINT_EN);
163	stat = ioread32(bridge->base + LINT_STAT);
164
165	/* Only look at unmasked interrupts */
166	stat &= enable;
167
168	if (unlikely(!stat))
169		return IRQ_NONE;
170
171	if (stat & CA91CX42_LINT_DMA)
172		serviced |= ca91cx42_DMA_irqhandler(bridge);
173	if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
174			CA91CX42_LINT_LM3))
175		serviced |= ca91cx42_LM_irqhandler(bridge, stat);
176	if (stat & CA91CX42_LINT_MBOX)
177		serviced |= ca91cx42_MB_irqhandler(bridge, stat);
178	if (stat & CA91CX42_LINT_SW_IACK)
179		serviced |= ca91cx42_IACK_irqhandler(bridge);
180	if (stat & CA91CX42_LINT_VERR)
181		serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
182	if (stat & CA91CX42_LINT_LERR)
183		serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
184	if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
185			CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
186			CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
187			CA91CX42_LINT_VIRQ7))
188		serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
189
190	/* Clear serviced interrupts */
191	iowrite32(serviced, bridge->base + LINT_STAT);
192
193	return IRQ_HANDLED;
194}
195
196static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
197{
198	int result, tmp;
199	struct pci_dev *pdev;
200	struct ca91cx42_driver *bridge;
201
202	bridge = ca91cx42_bridge->driver_priv;
203
204	/* Need pdev */
205	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
206
207	/* Initialise list for VME bus errors */
208	INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
209
210	mutex_init(&ca91cx42_bridge->irq_mtx);
211
212	/* Disable interrupts from PCI to VME */
213	iowrite32(0, bridge->base + VINT_EN);
214
215	/* Disable PCI interrupts */
216	iowrite32(0, bridge->base + LINT_EN);
217	/* Clear Any Pending PCI Interrupts */
218	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
219
220	result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
221			driver_name, ca91cx42_bridge);
222	if (result) {
223		dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
224		       pdev->irq);
225		return result;
226	}
227
228	/* Ensure all interrupts are mapped to PCI Interrupt 0 */
229	iowrite32(0, bridge->base + LINT_MAP0);
230	iowrite32(0, bridge->base + LINT_MAP1);
231	iowrite32(0, bridge->base + LINT_MAP2);
232
233	/* Enable DMA, mailbox & LM Interrupts */
234	tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
235		CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
236		CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
237
238	iowrite32(tmp, bridge->base + LINT_EN);
239
240	return 0;
241}
242
243static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
244	struct pci_dev *pdev)
245{
246	struct vme_bridge *ca91cx42_bridge;
247
248	/* Disable interrupts from PCI to VME */
249	iowrite32(0, bridge->base + VINT_EN);
250
251	/* Disable PCI interrupts */
252	iowrite32(0, bridge->base + LINT_EN);
253	/* Clear Any Pending PCI Interrupts */
254	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
255
256	ca91cx42_bridge = container_of((void *)bridge, struct vme_bridge,
257				       driver_priv);
258	free_irq(pdev->irq, ca91cx42_bridge);
259}
260
261static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
262{
263	u32 tmp;
264
265	tmp = ioread32(bridge->base + LINT_STAT);
266
267	if (tmp & (1 << level))
268		return 0;
269	else
270		return 1;
271}
272
273/*
274 * Set up an VME interrupt
275 */
276static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
277	int state, int sync)
278
279{
280	struct pci_dev *pdev;
281	u32 tmp;
282	struct ca91cx42_driver *bridge;
283
284	bridge = ca91cx42_bridge->driver_priv;
285
286	/* Enable IRQ level */
287	tmp = ioread32(bridge->base + LINT_EN);
288
289	if (state == 0)
290		tmp &= ~CA91CX42_LINT_VIRQ[level];
291	else
292		tmp |= CA91CX42_LINT_VIRQ[level];
293
294	iowrite32(tmp, bridge->base + LINT_EN);
295
296	if ((state == 0) && (sync != 0)) {
297		pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
298			dev);
299
300		synchronize_irq(pdev->irq);
301	}
302}
303
304static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
305	int statid)
306{
307	u32 tmp;
308	struct ca91cx42_driver *bridge;
309
310	bridge = ca91cx42_bridge->driver_priv;
311
312	/* Universe can only generate even vectors */
313	if (statid & 1)
314		return -EINVAL;
315
316	mutex_lock(&bridge->vme_int);
317
318	tmp = ioread32(bridge->base + VINT_EN);
319
320	/* Set Status/ID */
321	iowrite32(statid << 24, bridge->base + STATID);
322
323	/* Assert VMEbus IRQ */
324	tmp = tmp | (1 << (level + 24));
325	iowrite32(tmp, bridge->base + VINT_EN);
326
327	/* Wait for IACK */
328	wait_event_interruptible(bridge->iack_queue,
329				 ca91cx42_iack_received(bridge, level));
330
331	/* Return interrupt to low state */
332	tmp = ioread32(bridge->base + VINT_EN);
333	tmp = tmp & ~(1 << (level + 24));
334	iowrite32(tmp, bridge->base + VINT_EN);
335
336	mutex_unlock(&bridge->vme_int);
337
338	return 0;
339}
340
341static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
342	unsigned long long vme_base, unsigned long long size,
343	dma_addr_t pci_base, u32 aspace, u32 cycle)
344{
345	unsigned int i, addr = 0, granularity;
346	unsigned int temp_ctl = 0;
347	unsigned int vme_bound, pci_offset;
348	struct vme_bridge *ca91cx42_bridge;
349	struct ca91cx42_driver *bridge;
350
351	ca91cx42_bridge = image->parent;
352
353	bridge = ca91cx42_bridge->driver_priv;
354
355	i = image->number;
356
357	switch (aspace) {
358	case VME_A16:
359		addr |= CA91CX42_VSI_CTL_VAS_A16;
360		break;
361	case VME_A24:
362		addr |= CA91CX42_VSI_CTL_VAS_A24;
363		break;
364	case VME_A32:
365		addr |= CA91CX42_VSI_CTL_VAS_A32;
366		break;
367	case VME_USER1:
368		addr |= CA91CX42_VSI_CTL_VAS_USER1;
369		break;
370	case VME_USER2:
371		addr |= CA91CX42_VSI_CTL_VAS_USER2;
372		break;
373	case VME_A64:
374	case VME_CRCSR:
375	case VME_USER3:
376	case VME_USER4:
377	default:
378		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
379		return -EINVAL;
380		break;
381	}
382
383	/*
384	 * Bound address is a valid address for the window, adjust
385	 * accordingly
386	 */
387	vme_bound = vme_base + size;
388	pci_offset = pci_base - vme_base;
389
390	if ((i == 0) || (i == 4))
391		granularity = 0x1000;
392	else
393		granularity = 0x10000;
394
395	if (vme_base & (granularity - 1)) {
396		dev_err(ca91cx42_bridge->parent, "Invalid VME base "
397			"alignment\n");
398		return -EINVAL;
399	}
400	if (vme_bound & (granularity - 1)) {
401		dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
402			"alignment\n");
403		return -EINVAL;
404	}
405	if (pci_offset & (granularity - 1)) {
406		dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
407			"alignment\n");
408		return -EINVAL;
409	}
410
411	/* Disable while we are mucking around */
412	temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
413	temp_ctl &= ~CA91CX42_VSI_CTL_EN;
414	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
415
416	/* Setup mapping */
417	iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
418	iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
419	iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
420
421	/* Setup address space */
422	temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
423	temp_ctl |= addr;
424
425	/* Setup cycle types */
426	temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
427	if (cycle & VME_SUPER)
428		temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
429	if (cycle & VME_USER)
430		temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
431	if (cycle & VME_PROG)
432		temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
433	if (cycle & VME_DATA)
434		temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
435
436	/* Write ctl reg without enable */
437	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
438
439	if (enabled)
440		temp_ctl |= CA91CX42_VSI_CTL_EN;
441
442	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
443
444	return 0;
445}
446
447static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
448	unsigned long long *vme_base, unsigned long long *size,
449	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
450{
451	unsigned int i, granularity = 0, ctl = 0;
452	unsigned long long vme_bound, pci_offset;
453	struct ca91cx42_driver *bridge;
454
455	bridge = image->parent->driver_priv;
456
457	i = image->number;
458
459	if ((i == 0) || (i == 4))
460		granularity = 0x1000;
461	else
462		granularity = 0x10000;
463
464	/* Read Registers */
465	ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
466
467	*vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
468	vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
469	pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
470
471	*pci_base = (dma_addr_t)vme_base + pci_offset;
472	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
473
474	*enabled = 0;
475	*aspace = 0;
476	*cycle = 0;
477
478	if (ctl & CA91CX42_VSI_CTL_EN)
479		*enabled = 1;
480
481	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
482		*aspace = VME_A16;
483	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
484		*aspace = VME_A24;
485	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
486		*aspace = VME_A32;
487	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
488		*aspace = VME_USER1;
489	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
490		*aspace = VME_USER2;
491
492	if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
493		*cycle |= VME_SUPER;
494	if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
495		*cycle |= VME_USER;
496	if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
497		*cycle |= VME_PROG;
498	if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
499		*cycle |= VME_DATA;
500
501	return 0;
502}
503
504/*
505 * Allocate and map PCI Resource
506 */
507static int ca91cx42_alloc_resource(struct vme_master_resource *image,
508	unsigned long long size)
509{
510	unsigned long long existing_size;
511	int retval = 0;
512	struct pci_dev *pdev;
513	struct vme_bridge *ca91cx42_bridge;
514
515	ca91cx42_bridge = image->parent;
516
517	/* Find pci_dev container of dev */
518	if (ca91cx42_bridge->parent == NULL) {
519		dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
520		return -EINVAL;
521	}
522	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
523
524	existing_size = (unsigned long long)(image->bus_resource.end -
525		image->bus_resource.start);
526
527	/* If the existing size is OK, return */
528	if (existing_size == (size - 1))
529		return 0;
530
531	if (existing_size != 0) {
532		iounmap(image->kern_base);
533		image->kern_base = NULL;
534		kfree(image->bus_resource.name);
535		release_resource(&image->bus_resource);
536		memset(&image->bus_resource, 0, sizeof(struct resource));
537	}
538
539	if (image->bus_resource.name == NULL) {
540		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
541		if (image->bus_resource.name == NULL) {
542			dev_err(ca91cx42_bridge->parent, "Unable to allocate "
543				"memory for resource name\n");
544			retval = -ENOMEM;
545			goto err_name;
546		}
547	}
548
549	sprintf((char *)image->bus_resource.name, "%s.%d",
550		ca91cx42_bridge->name, image->number);
551
552	image->bus_resource.start = 0;
553	image->bus_resource.end = (unsigned long)size;
554	image->bus_resource.flags = IORESOURCE_MEM;
555
556	retval = pci_bus_alloc_resource(pdev->bus,
557		&image->bus_resource, size, size, PCIBIOS_MIN_MEM,
558		0, NULL, NULL);
559	if (retval) {
560		dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
561			"resource for window %d size 0x%lx start 0x%lx\n",
562			image->number, (unsigned long)size,
563			(unsigned long)image->bus_resource.start);
564		goto err_resource;
565	}
566
567	image->kern_base = ioremap_nocache(
568		image->bus_resource.start, size);
569	if (image->kern_base == NULL) {
570		dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
571		retval = -ENOMEM;
572		goto err_remap;
573	}
574
575	return 0;
576
577err_remap:
578	release_resource(&image->bus_resource);
579err_resource:
580	kfree(image->bus_resource.name);
581	memset(&image->bus_resource, 0, sizeof(struct resource));
582err_name:
583	return retval;
584}
585
586/*
587 * Free and unmap PCI Resource
588 */
589static void ca91cx42_free_resource(struct vme_master_resource *image)
590{
591	iounmap(image->kern_base);
592	image->kern_base = NULL;
593	release_resource(&image->bus_resource);
594	kfree(image->bus_resource.name);
595	memset(&image->bus_resource, 0, sizeof(struct resource));
596}
597
598
599static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
600	unsigned long long vme_base, unsigned long long size, u32 aspace,
601	u32 cycle, u32 dwidth)
602{
603	int retval = 0;
604	unsigned int i, granularity = 0;
605	unsigned int temp_ctl = 0;
606	unsigned long long pci_bound, vme_offset, pci_base;
607	struct vme_bridge *ca91cx42_bridge;
608	struct ca91cx42_driver *bridge;
609
610	ca91cx42_bridge = image->parent;
611
612	bridge = ca91cx42_bridge->driver_priv;
613
614	i = image->number;
615
616	if ((i == 0) || (i == 4))
617		granularity = 0x1000;
618	else
619		granularity = 0x10000;
620
621	/* Verify input data */
622	if (vme_base & (granularity - 1)) {
623		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
624			"alignment\n");
625		retval = -EINVAL;
626		goto err_window;
627	}
628	if (size & (granularity - 1)) {
629		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
630			"alignment\n");
631		retval = -EINVAL;
632		goto err_window;
633	}
634
635	spin_lock(&image->lock);
636
637	/*
638	 * Let's allocate the resource here rather than further up the stack as
639	 * it avoids pushing loads of bus dependent stuff up the stack
640	 */
641	retval = ca91cx42_alloc_resource(image, size);
642	if (retval) {
643		spin_unlock(&image->lock);
644		dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
645			"for resource name\n");
646		retval = -ENOMEM;
647		goto err_res;
648	}
649
650	pci_base = (unsigned long long)image->bus_resource.start;
651
652	/*
653	 * Bound address is a valid address for the window, adjust
654	 * according to window granularity.
655	 */
656	pci_bound = pci_base + size;
657	vme_offset = vme_base - pci_base;
658
659	/* Disable while we are mucking around */
660	temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
661	temp_ctl &= ~CA91CX42_LSI_CTL_EN;
662	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
663
664	/* Setup cycle types */
665	temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
666	if (cycle & VME_BLT)
667		temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
668	if (cycle & VME_MBLT)
669		temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
670
671	/* Setup data width */
672	temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
673	switch (dwidth) {
674	case VME_D8:
675		temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
676		break;
677	case VME_D16:
678		temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
679		break;
680	case VME_D32:
681		temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
682		break;
683	case VME_D64:
684		temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
685		break;
686	default:
687		spin_unlock(&image->lock);
688		dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
689		retval = -EINVAL;
690		goto err_dwidth;
691		break;
692	}
693
694	/* Setup address space */
695	temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
696	switch (aspace) {
697	case VME_A16:
698		temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
699		break;
700	case VME_A24:
701		temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
702		break;
703	case VME_A32:
704		temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
705		break;
706	case VME_CRCSR:
707		temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
708		break;
709	case VME_USER1:
710		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
711		break;
712	case VME_USER2:
713		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
714		break;
715	case VME_A64:
716	case VME_USER3:
717	case VME_USER4:
718	default:
719		spin_unlock(&image->lock);
720		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
721		retval = -EINVAL;
722		goto err_aspace;
723		break;
724	}
725
726	temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
727	if (cycle & VME_SUPER)
728		temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
729	if (cycle & VME_PROG)
730		temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
731
732	/* Setup mapping */
733	iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
734	iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
735	iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
736
737	/* Write ctl reg without enable */
738	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
739
740	if (enabled)
741		temp_ctl |= CA91CX42_LSI_CTL_EN;
742
743	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
744
745	spin_unlock(&image->lock);
746	return 0;
747
748err_aspace:
749err_dwidth:
750	ca91cx42_free_resource(image);
751err_res:
752err_window:
753	return retval;
754}
755
756static int __ca91cx42_master_get(struct vme_master_resource *image,
757	int *enabled, unsigned long long *vme_base, unsigned long long *size,
758	u32 *aspace, u32 *cycle, u32 *dwidth)
759{
760	unsigned int i, ctl;
761	unsigned long long pci_base, pci_bound, vme_offset;
762	struct ca91cx42_driver *bridge;
763
764	bridge = image->parent->driver_priv;
765
766	i = image->number;
767
768	ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
769
770	pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
771	vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
772	pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
773
774	*vme_base = pci_base + vme_offset;
775	*size = (unsigned long long)(pci_bound - pci_base);
776
777	*enabled = 0;
778	*aspace = 0;
779	*cycle = 0;
780	*dwidth = 0;
781
782	if (ctl & CA91CX42_LSI_CTL_EN)
783		*enabled = 1;
784
785	/* Setup address space */
786	switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
787	case CA91CX42_LSI_CTL_VAS_A16:
788		*aspace = VME_A16;
789		break;
790	case CA91CX42_LSI_CTL_VAS_A24:
791		*aspace = VME_A24;
792		break;
793	case CA91CX42_LSI_CTL_VAS_A32:
794		*aspace = VME_A32;
795		break;
796	case CA91CX42_LSI_CTL_VAS_CRCSR:
797		*aspace = VME_CRCSR;
798		break;
799	case CA91CX42_LSI_CTL_VAS_USER1:
800		*aspace = VME_USER1;
801		break;
802	case CA91CX42_LSI_CTL_VAS_USER2:
803		*aspace = VME_USER2;
804		break;
805	}
806
807	/* XXX Not sure howto check for MBLT */
808	/* Setup cycle types */
809	if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
810		*cycle |= VME_BLT;
811	else
812		*cycle |= VME_SCT;
813
814	if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
815		*cycle |= VME_SUPER;
816	else
817		*cycle |= VME_USER;
818
819	if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
820		*cycle = VME_PROG;
821	else
822		*cycle = VME_DATA;
823
824	/* Setup data width */
825	switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
826	case CA91CX42_LSI_CTL_VDW_D8:
827		*dwidth = VME_D8;
828		break;
829	case CA91CX42_LSI_CTL_VDW_D16:
830		*dwidth = VME_D16;
831		break;
832	case CA91CX42_LSI_CTL_VDW_D32:
833		*dwidth = VME_D32;
834		break;
835	case CA91CX42_LSI_CTL_VDW_D64:
836		*dwidth = VME_D64;
837		break;
838	}
839
840	return 0;
841}
842
843static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
844	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
845	u32 *cycle, u32 *dwidth)
846{
847	int retval;
848
849	spin_lock(&image->lock);
850
851	retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
852		cycle, dwidth);
853
854	spin_unlock(&image->lock);
855
856	return retval;
857}
858
859static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
860	void *buf, size_t count, loff_t offset)
861{
862	ssize_t retval;
863	void __iomem *addr = image->kern_base + offset;
864	unsigned int done = 0;
865	unsigned int count32;
866
867	if (count == 0)
868		return 0;
869
870	spin_lock(&image->lock);
871
872	/* The following code handles VME address alignment. We cannot use
873	 * memcpy_xxx here because it may cut data transfers in to 8-bit
874	 * cycles when D16 or D32 cycles are required on the VME bus.
875	 * On the other hand, the bridge itself assures that the maximum data
876	 * cycle configured for the transfer is used and splits it
877	 * automatically for non-aligned addresses, so we don't want the
878	 * overhead of needlessly forcing small transfers for the entire cycle.
879	 */
880	if ((uintptr_t)addr & 0x1) {
881		*(u8 *)buf = ioread8(addr);
882		done += 1;
883		if (done == count)
884			goto out;
885	}
886	if ((uintptr_t)(addr + done) & 0x2) {
887		if ((count - done) < 2) {
888			*(u8 *)(buf + done) = ioread8(addr + done);
889			done += 1;
890			goto out;
891		} else {
892			*(u16 *)(buf + done) = ioread16(addr + done);
893			done += 2;
894		}
895	}
896
897	count32 = (count - done) & ~0x3;
898	while (done < count32) {
899		*(u32 *)(buf + done) = ioread32(addr + done);
900		done += 4;
901	}
902
903	if ((count - done) & 0x2) {
904		*(u16 *)(buf + done) = ioread16(addr + done);
905		done += 2;
906	}
907	if ((count - done) & 0x1) {
908		*(u8 *)(buf + done) = ioread8(addr + done);
909		done += 1;
910	}
911out:
912	retval = count;
913	spin_unlock(&image->lock);
914
915	return retval;
916}
917
918static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
919	void *buf, size_t count, loff_t offset)
920{
921	ssize_t retval;
922	void __iomem *addr = image->kern_base + offset;
923	unsigned int done = 0;
924	unsigned int count32;
925
926	if (count == 0)
927		return 0;
928
929	spin_lock(&image->lock);
930
931	/* Here we apply for the same strategy we do in master_read
932	 * function in order to assure the correct cycles.
933	 */
934	if ((uintptr_t)addr & 0x1) {
935		iowrite8(*(u8 *)buf, addr);
936		done += 1;
937		if (done == count)
938			goto out;
939	}
940	if ((uintptr_t)(addr + done) & 0x2) {
941		if ((count - done) < 2) {
942			iowrite8(*(u8 *)(buf + done), addr + done);
943			done += 1;
944			goto out;
945		} else {
946			iowrite16(*(u16 *)(buf + done), addr + done);
947			done += 2;
948		}
949	}
950
951	count32 = (count - done) & ~0x3;
952	while (done < count32) {
953		iowrite32(*(u32 *)(buf + done), addr + done);
954		done += 4;
955	}
956
957	if ((count - done) & 0x2) {
958		iowrite16(*(u16 *)(buf + done), addr + done);
959		done += 2;
960	}
961	if ((count - done) & 0x1) {
962		iowrite8(*(u8 *)(buf + done), addr + done);
963		done += 1;
964	}
965out:
966	retval = count;
967
968	spin_unlock(&image->lock);
969
970	return retval;
971}
972
973static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
974	unsigned int mask, unsigned int compare, unsigned int swap,
975	loff_t offset)
976{
977	u32 result;
978	uintptr_t pci_addr;
979	int i;
980	struct ca91cx42_driver *bridge;
981	struct device *dev;
982
983	bridge = image->parent->driver_priv;
984	dev = image->parent->parent;
985
986	/* Find the PCI address that maps to the desired VME address */
987	i = image->number;
988
989	/* Locking as we can only do one of these at a time */
990	mutex_lock(&bridge->vme_rmw);
991
992	/* Lock image */
993	spin_lock(&image->lock);
994
995	pci_addr = (uintptr_t)image->kern_base + offset;
996
997	/* Address must be 4-byte aligned */
998	if (pci_addr & 0x3) {
999		dev_err(dev, "RMW Address not 4-byte aligned\n");
1000		result = -EINVAL;
1001		goto out;
1002	}
1003
1004	/* Ensure RMW Disabled whilst configuring */
1005	iowrite32(0, bridge->base + SCYC_CTL);
1006
1007	/* Configure registers */
1008	iowrite32(mask, bridge->base + SCYC_EN);
1009	iowrite32(compare, bridge->base + SCYC_CMP);
1010	iowrite32(swap, bridge->base + SCYC_SWP);
1011	iowrite32(pci_addr, bridge->base + SCYC_ADDR);
1012
1013	/* Enable RMW */
1014	iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
1015
1016	/* Kick process off with a read to the required address. */
1017	result = ioread32(image->kern_base + offset);
1018
1019	/* Disable RMW */
1020	iowrite32(0, bridge->base + SCYC_CTL);
1021
1022out:
1023	spin_unlock(&image->lock);
1024
1025	mutex_unlock(&bridge->vme_rmw);
1026
1027	return result;
1028}
1029
1030static int ca91cx42_dma_list_add(struct vme_dma_list *list,
1031	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1032{
1033	struct ca91cx42_dma_entry *entry, *prev;
1034	struct vme_dma_pci *pci_attr;
1035	struct vme_dma_vme *vme_attr;
1036	dma_addr_t desc_ptr;
1037	int retval = 0;
1038	struct device *dev;
1039
1040	dev = list->parent->parent->parent;
1041
1042	/* XXX descriptor must be aligned on 64-bit boundaries */
1043	entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
1044	if (entry == NULL) {
1045		dev_err(dev, "Failed to allocate memory for dma resource "
1046			"structure\n");
1047		retval = -ENOMEM;
1048		goto err_mem;
1049	}
1050
1051	/* Test descriptor alignment */
1052	if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
1053		dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
1054			"required: %p\n", &entry->descriptor);
1055		retval = -EINVAL;
1056		goto err_align;
1057	}
1058
1059	memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
1060
1061	if (dest->type == VME_DMA_VME) {
1062		entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
1063		vme_attr = dest->private;
1064		pci_attr = src->private;
1065	} else {
1066		vme_attr = src->private;
1067		pci_attr = dest->private;
1068	}
1069
1070	/* Check we can do fulfill required attributes */
1071	if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
1072		VME_USER2)) != 0) {
1073
1074		dev_err(dev, "Unsupported cycle type\n");
1075		retval = -EINVAL;
1076		goto err_aspace;
1077	}
1078
1079	if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
1080		VME_PROG | VME_DATA)) != 0) {
1081
1082		dev_err(dev, "Unsupported cycle type\n");
1083		retval = -EINVAL;
1084		goto err_cycle;
1085	}
1086
1087	/* Check to see if we can fulfill source and destination */
1088	if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
1089		((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
1090
1091		dev_err(dev, "Cannot perform transfer with this "
1092			"source-destination combination\n");
1093		retval = -EINVAL;
1094		goto err_direct;
1095	}
1096
1097	/* Setup cycle types */
1098	if (vme_attr->cycle & VME_BLT)
1099		entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1100
1101	/* Setup data width */
1102	switch (vme_attr->dwidth) {
1103	case VME_D8:
1104		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1105		break;
1106	case VME_D16:
1107		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1108		break;
1109	case VME_D32:
1110		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1111		break;
1112	case VME_D64:
1113		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1114		break;
1115	default:
1116		dev_err(dev, "Invalid data width\n");
1117		return -EINVAL;
1118	}
1119
1120	/* Setup address space */
1121	switch (vme_attr->aspace) {
1122	case VME_A16:
1123		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1124		break;
1125	case VME_A24:
1126		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1127		break;
1128	case VME_A32:
1129		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1130		break;
1131	case VME_USER1:
1132		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1133		break;
1134	case VME_USER2:
1135		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1136		break;
1137	default:
1138		dev_err(dev, "Invalid address space\n");
1139		return -EINVAL;
1140		break;
1141	}
1142
1143	if (vme_attr->cycle & VME_SUPER)
1144		entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1145	if (vme_attr->cycle & VME_PROG)
1146		entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1147
1148	entry->descriptor.dtbc = count;
1149	entry->descriptor.dla = pci_attr->address;
1150	entry->descriptor.dva = vme_attr->address;
1151	entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1152
1153	/* Add to list */
1154	list_add_tail(&entry->list, &list->entries);
1155
1156	/* Fill out previous descriptors "Next Address" */
1157	if (entry->list.prev != &list->entries) {
1158		prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1159			list);
1160		/* We need the bus address for the pointer */
1161		desc_ptr = virt_to_bus(&entry->descriptor);
1162		prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1163	}
1164
1165	return 0;
1166
1167err_cycle:
1168err_aspace:
1169err_direct:
1170err_align:
1171	kfree(entry);
1172err_mem:
1173	return retval;
1174}
1175
1176static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1177{
1178	u32 tmp;
1179	struct ca91cx42_driver *bridge;
1180
1181	bridge = ca91cx42_bridge->driver_priv;
1182
1183	tmp = ioread32(bridge->base + DGCS);
1184
1185	if (tmp & CA91CX42_DGCS_ACT)
1186		return 0;
1187	else
1188		return 1;
1189}
1190
1191static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1192{
1193	struct vme_dma_resource *ctrlr;
1194	struct ca91cx42_dma_entry *entry;
1195	int retval = 0;
1196	dma_addr_t bus_addr;
1197	u32 val;
1198	struct device *dev;
1199	struct ca91cx42_driver *bridge;
1200
1201	ctrlr = list->parent;
1202
1203	bridge = ctrlr->parent->driver_priv;
1204	dev = ctrlr->parent->parent;
1205
1206	mutex_lock(&ctrlr->mtx);
1207
1208	if (!(list_empty(&ctrlr->running))) {
1209		/*
1210		 * XXX We have an active DMA transfer and currently haven't
1211		 *     sorted out the mechanism for "pending" DMA transfers.
1212		 *     Return busy.
1213		 */
1214		/* Need to add to pending here */
1215		mutex_unlock(&ctrlr->mtx);
1216		return -EBUSY;
1217	} else {
1218		list_add(&list->list, &ctrlr->running);
1219	}
1220
1221	/* Get first bus address and write into registers */
1222	entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
1223		list);
1224
1225	bus_addr = virt_to_bus(&entry->descriptor);
1226
1227	mutex_unlock(&ctrlr->mtx);
1228
1229	iowrite32(0, bridge->base + DTBC);
1230	iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1231
1232	/* Start the operation */
1233	val = ioread32(bridge->base + DGCS);
1234
1235	/* XXX Could set VMEbus On and Off Counters here */
1236	val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1237
1238	val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1239		CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1240		CA91CX42_DGCS_PERR);
1241
1242	iowrite32(val, bridge->base + DGCS);
1243
1244	val |= CA91CX42_DGCS_GO;
1245
1246	iowrite32(val, bridge->base + DGCS);
1247
1248	wait_event_interruptible(bridge->dma_queue,
1249		ca91cx42_dma_busy(ctrlr->parent));
1250
1251	/*
1252	 * Read status register, this register is valid until we kick off a
1253	 * new transfer.
1254	 */
1255	val = ioread32(bridge->base + DGCS);
1256
1257	if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1258		CA91CX42_DGCS_PERR)) {
1259
1260		dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1261		val = ioread32(bridge->base + DCTL);
1262	}
1263
1264	/* Remove list from running list */
1265	mutex_lock(&ctrlr->mtx);
1266	list_del(&list->list);
1267	mutex_unlock(&ctrlr->mtx);
1268
1269	return retval;
1270
1271}
1272
1273static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1274{
1275	struct list_head *pos, *temp;
1276	struct ca91cx42_dma_entry *entry;
1277
1278	/* detach and free each entry */
1279	list_for_each_safe(pos, temp, &list->entries) {
1280		list_del(pos);
1281		entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1282		kfree(entry);
1283	}
1284
1285	return 0;
1286}
1287
1288/*
1289 * All 4 location monitors reside at the same base - this is therefore a
1290 * system wide configuration.
1291 *
1292 * This does not enable the LM monitor - that should be done when the first
1293 * callback is attached and disabled when the last callback is removed.
1294 */
1295static int ca91cx42_lm_set(struct vme_lm_resource *lm,
1296	unsigned long long lm_base, u32 aspace, u32 cycle)
1297{
1298	u32 temp_base, lm_ctl = 0;
1299	int i;
1300	struct ca91cx42_driver *bridge;
1301	struct device *dev;
1302
1303	bridge = lm->parent->driver_priv;
1304	dev = lm->parent->parent;
1305
1306	/* Check the alignment of the location monitor */
1307	temp_base = (u32)lm_base;
1308	if (temp_base & 0xffff) {
1309		dev_err(dev, "Location monitor must be aligned to 64KB "
1310			"boundary");
1311		return -EINVAL;
1312	}
1313
1314	mutex_lock(&lm->mtx);
1315
1316	/* If we already have a callback attached, we can't move it! */
1317	for (i = 0; i < lm->monitors; i++) {
1318		if (bridge->lm_callback[i] != NULL) {
1319			mutex_unlock(&lm->mtx);
1320			dev_err(dev, "Location monitor callback attached, "
1321				"can't reset\n");
1322			return -EBUSY;
1323		}
1324	}
1325
1326	switch (aspace) {
1327	case VME_A16:
1328		lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1329		break;
1330	case VME_A24:
1331		lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1332		break;
1333	case VME_A32:
1334		lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1335		break;
1336	default:
1337		mutex_unlock(&lm->mtx);
1338		dev_err(dev, "Invalid address space\n");
1339		return -EINVAL;
1340		break;
1341	}
1342
1343	if (cycle & VME_SUPER)
1344		lm_ctl |= CA91CX42_LM_CTL_SUPR;
1345	if (cycle & VME_USER)
1346		lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1347	if (cycle & VME_PROG)
1348		lm_ctl |= CA91CX42_LM_CTL_PGM;
1349	if (cycle & VME_DATA)
1350		lm_ctl |= CA91CX42_LM_CTL_DATA;
1351
1352	iowrite32(lm_base, bridge->base + LM_BS);
1353	iowrite32(lm_ctl, bridge->base + LM_CTL);
1354
1355	mutex_unlock(&lm->mtx);
1356
1357	return 0;
1358}
1359
1360/* Get configuration of the callback monitor and return whether it is enabled
1361 * or disabled.
1362 */
1363static int ca91cx42_lm_get(struct vme_lm_resource *lm,
1364	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1365{
1366	u32 lm_ctl, enabled = 0;
1367	struct ca91cx42_driver *bridge;
1368
1369	bridge = lm->parent->driver_priv;
1370
1371	mutex_lock(&lm->mtx);
1372
1373	*lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1374	lm_ctl = ioread32(bridge->base + LM_CTL);
1375
1376	if (lm_ctl & CA91CX42_LM_CTL_EN)
1377		enabled = 1;
1378
1379	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1380		*aspace = VME_A16;
1381	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1382		*aspace = VME_A24;
1383	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1384		*aspace = VME_A32;
1385
1386	*cycle = 0;
1387	if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1388		*cycle |= VME_SUPER;
1389	if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1390		*cycle |= VME_USER;
1391	if (lm_ctl & CA91CX42_LM_CTL_PGM)
1392		*cycle |= VME_PROG;
1393	if (lm_ctl & CA91CX42_LM_CTL_DATA)
1394		*cycle |= VME_DATA;
1395
1396	mutex_unlock(&lm->mtx);
1397
1398	return enabled;
1399}
1400
1401/*
1402 * Attach a callback to a specific location monitor.
1403 *
1404 * Callback will be passed the monitor triggered.
1405 */
1406static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1407	void (*callback)(int))
1408{
1409	u32 lm_ctl, tmp;
1410	struct ca91cx42_driver *bridge;
1411	struct device *dev;
1412
1413	bridge = lm->parent->driver_priv;
1414	dev = lm->parent->parent;
1415
1416	mutex_lock(&lm->mtx);
1417
1418	/* Ensure that the location monitor is configured - need PGM or DATA */
1419	lm_ctl = ioread32(bridge->base + LM_CTL);
1420	if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1421		mutex_unlock(&lm->mtx);
1422		dev_err(dev, "Location monitor not properly configured\n");
1423		return -EINVAL;
1424	}
1425
1426	/* Check that a callback isn't already attached */
1427	if (bridge->lm_callback[monitor] != NULL) {
1428		mutex_unlock(&lm->mtx);
1429		dev_err(dev, "Existing callback attached\n");
1430		return -EBUSY;
1431	}
1432
1433	/* Attach callback */
1434	bridge->lm_callback[monitor] = callback;
1435
1436	/* Enable Location Monitor interrupt */
1437	tmp = ioread32(bridge->base + LINT_EN);
1438	tmp |= CA91CX42_LINT_LM[monitor];
1439	iowrite32(tmp, bridge->base + LINT_EN);
1440
1441	/* Ensure that global Location Monitor Enable set */
1442	if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1443		lm_ctl |= CA91CX42_LM_CTL_EN;
1444		iowrite32(lm_ctl, bridge->base + LM_CTL);
1445	}
1446
1447	mutex_unlock(&lm->mtx);
1448
1449	return 0;
1450}
1451
1452/*
1453 * Detach a callback function forn a specific location monitor.
1454 */
1455static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1456{
1457	u32 tmp;
1458	struct ca91cx42_driver *bridge;
1459
1460	bridge = lm->parent->driver_priv;
1461
1462	mutex_lock(&lm->mtx);
1463
1464	/* Disable Location Monitor and ensure previous interrupts are clear */
1465	tmp = ioread32(bridge->base + LINT_EN);
1466	tmp &= ~CA91CX42_LINT_LM[monitor];
1467	iowrite32(tmp, bridge->base + LINT_EN);
1468
1469	iowrite32(CA91CX42_LINT_LM[monitor],
1470		 bridge->base + LINT_STAT);
1471
1472	/* Detach callback */
1473	bridge->lm_callback[monitor] = NULL;
1474
1475	/* If all location monitors disabled, disable global Location Monitor */
1476	if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1477			CA91CX42_LINT_LM3)) == 0) {
1478		tmp = ioread32(bridge->base + LM_CTL);
1479		tmp &= ~CA91CX42_LM_CTL_EN;
1480		iowrite32(tmp, bridge->base + LM_CTL);
1481	}
1482
1483	mutex_unlock(&lm->mtx);
1484
1485	return 0;
1486}
1487
1488static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1489{
1490	u32 slot = 0;
1491	struct ca91cx42_driver *bridge;
1492
1493	bridge = ca91cx42_bridge->driver_priv;
1494
1495	if (!geoid) {
1496		slot = ioread32(bridge->base + VCSR_BS);
1497		slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1498	} else
1499		slot = geoid;
1500
1501	return (int)slot;
1502
1503}
1504
1505static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
1506	dma_addr_t *dma)
1507{
1508	struct pci_dev *pdev;
1509
1510	/* Find pci_dev container of dev */
1511	pdev = container_of(parent, struct pci_dev, dev);
1512
1513	return pci_alloc_consistent(pdev, size, dma);
1514}
1515
1516static void ca91cx42_free_consistent(struct device *parent, size_t size,
1517	void *vaddr, dma_addr_t dma)
1518{
1519	struct pci_dev *pdev;
1520
1521	/* Find pci_dev container of dev */
1522	pdev = container_of(parent, struct pci_dev, dev);
1523
1524	pci_free_consistent(pdev, size, vaddr, dma);
1525}
1526
1527/*
1528 * Configure CR/CSR space
1529 *
1530 * Access to the CR/CSR can be configured at power-up. The location of the
1531 * CR/CSR registers in the CR/CSR address space is determined by the boards
1532 * Auto-ID or Geographic address. This function ensures that the window is
1533 * enabled at an offset consistent with the boards geopgraphic address.
1534 */
1535static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1536	struct pci_dev *pdev)
1537{
1538	unsigned int crcsr_addr;
1539	int tmp, slot;
1540	struct ca91cx42_driver *bridge;
1541
1542	bridge = ca91cx42_bridge->driver_priv;
1543
1544	slot = ca91cx42_slot_get(ca91cx42_bridge);
1545
1546	/* Write CSR Base Address if slot ID is supplied as a module param */
1547	if (geoid)
1548		iowrite32(geoid << 27, bridge->base + VCSR_BS);
1549
1550	dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1551	if (slot == 0) {
1552		dev_err(&pdev->dev, "Slot number is unset, not configuring "
1553			"CR/CSR space\n");
1554		return -EINVAL;
1555	}
1556
1557	/* Allocate mem for CR/CSR image */
1558	bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1559						     &bridge->crcsr_bus);
1560	if (bridge->crcsr_kernel == NULL) {
1561		dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1562			"image\n");
1563		return -ENOMEM;
1564	}
1565
1566	crcsr_addr = slot * (512 * 1024);
1567	iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1568
1569	tmp = ioread32(bridge->base + VCSR_CTL);
1570	tmp |= CA91CX42_VCSR_CTL_EN;
1571	iowrite32(tmp, bridge->base + VCSR_CTL);
1572
1573	return 0;
1574}
1575
1576static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1577	struct pci_dev *pdev)
1578{
1579	u32 tmp;
1580	struct ca91cx42_driver *bridge;
1581
1582	bridge = ca91cx42_bridge->driver_priv;
1583
1584	/* Turn off CR/CSR space */
1585	tmp = ioread32(bridge->base + VCSR_CTL);
1586	tmp &= ~CA91CX42_VCSR_CTL_EN;
1587	iowrite32(tmp, bridge->base + VCSR_CTL);
1588
1589	/* Free image */
1590	iowrite32(0, bridge->base + VCSR_TO);
1591
1592	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1593		bridge->crcsr_bus);
1594}
1595
1596static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1597{
1598	int retval, i;
1599	u32 data;
1600	struct list_head *pos = NULL, *n;
1601	struct vme_bridge *ca91cx42_bridge;
1602	struct ca91cx42_driver *ca91cx42_device;
1603	struct vme_master_resource *master_image;
1604	struct vme_slave_resource *slave_image;
1605	struct vme_dma_resource *dma_ctrlr;
1606	struct vme_lm_resource *lm;
1607
1608	/* We want to support more than one of each bridge so we need to
1609	 * dynamically allocate the bridge structure
1610	 */
1611	ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1612
1613	if (ca91cx42_bridge == NULL) {
1614		dev_err(&pdev->dev, "Failed to allocate memory for device "
1615			"structure\n");
1616		retval = -ENOMEM;
1617		goto err_struct;
1618	}
1619
1620	ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1621
1622	if (ca91cx42_device == NULL) {
1623		dev_err(&pdev->dev, "Failed to allocate memory for device "
1624			"structure\n");
1625		retval = -ENOMEM;
1626		goto err_driver;
1627	}
1628
1629	ca91cx42_bridge->driver_priv = ca91cx42_device;
1630
1631	/* Enable the device */
1632	retval = pci_enable_device(pdev);
1633	if (retval) {
1634		dev_err(&pdev->dev, "Unable to enable device\n");
1635		goto err_enable;
1636	}
1637
1638	/* Map Registers */
1639	retval = pci_request_regions(pdev, driver_name);
1640	if (retval) {
1641		dev_err(&pdev->dev, "Unable to reserve resources\n");
1642		goto err_resource;
1643	}
1644
1645	/* map registers in BAR 0 */
1646	ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1647		4096);
1648	if (!ca91cx42_device->base) {
1649		dev_err(&pdev->dev, "Unable to remap CRG region\n");
1650		retval = -EIO;
1651		goto err_remap;
1652	}
1653
1654	/* Check to see if the mapping worked out */
1655	data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1656	if (data != PCI_VENDOR_ID_TUNDRA) {
1657		dev_err(&pdev->dev, "PCI_ID check failed\n");
1658		retval = -EIO;
1659		goto err_test;
1660	}
1661
1662	/* Initialize wait queues & mutual exclusion flags */
1663	init_waitqueue_head(&ca91cx42_device->dma_queue);
1664	init_waitqueue_head(&ca91cx42_device->iack_queue);
1665	mutex_init(&ca91cx42_device->vme_int);
1666	mutex_init(&ca91cx42_device->vme_rmw);
1667
1668	ca91cx42_bridge->parent = &pdev->dev;
1669	strcpy(ca91cx42_bridge->name, driver_name);
1670
1671	/* Setup IRQ */
1672	retval = ca91cx42_irq_init(ca91cx42_bridge);
1673	if (retval != 0) {
1674		dev_err(&pdev->dev, "Chip Initialization failed.\n");
1675		goto err_irq;
1676	}
1677
1678	/* Add master windows to list */
1679	INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
1680	for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1681		master_image = kmalloc(sizeof(struct vme_master_resource),
1682			GFP_KERNEL);
1683		if (master_image == NULL) {
1684			dev_err(&pdev->dev, "Failed to allocate memory for "
1685			"master resource structure\n");
1686			retval = -ENOMEM;
1687			goto err_master;
1688		}
1689		master_image->parent = ca91cx42_bridge;
1690		spin_lock_init(&master_image->lock);
1691		master_image->locked = 0;
1692		master_image->number = i;
1693		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1694			VME_CRCSR | VME_USER1 | VME_USER2;
1695		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1696			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1697		master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1698		memset(&master_image->bus_resource, 0,
1699			sizeof(struct resource));
1700		master_image->kern_base  = NULL;
1701		list_add_tail(&master_image->list,
1702			&ca91cx42_bridge->master_resources);
1703	}
1704
1705	/* Add slave windows to list */
1706	INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
1707	for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1708		slave_image = kmalloc(sizeof(struct vme_slave_resource),
1709			GFP_KERNEL);
1710		if (slave_image == NULL) {
1711			dev_err(&pdev->dev, "Failed to allocate memory for "
1712			"slave resource structure\n");
1713			retval = -ENOMEM;
1714			goto err_slave;
1715		}
1716		slave_image->parent = ca91cx42_bridge;
1717		mutex_init(&slave_image->mtx);
1718		slave_image->locked = 0;
1719		slave_image->number = i;
1720		slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1721			VME_USER2;
1722
1723		/* Only windows 0 and 4 support A16 */
1724		if (i == 0 || i == 4)
1725			slave_image->address_attr |= VME_A16;
1726
1727		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1728			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1729		list_add_tail(&slave_image->list,
1730			&ca91cx42_bridge->slave_resources);
1731	}
1732
1733	/* Add dma engines to list */
1734	INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
1735	for (i = 0; i < CA91C142_MAX_DMA; i++) {
1736		dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1737			GFP_KERNEL);
1738		if (dma_ctrlr == NULL) {
1739			dev_err(&pdev->dev, "Failed to allocate memory for "
1740			"dma resource structure\n");
1741			retval = -ENOMEM;
1742			goto err_dma;
1743		}
1744		dma_ctrlr->parent = ca91cx42_bridge;
1745		mutex_init(&dma_ctrlr->mtx);
1746		dma_ctrlr->locked = 0;
1747		dma_ctrlr->number = i;
1748		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1749			VME_DMA_MEM_TO_VME;
1750		INIT_LIST_HEAD(&dma_ctrlr->pending);
1751		INIT_LIST_HEAD(&dma_ctrlr->running);
1752		list_add_tail(&dma_ctrlr->list,
1753			&ca91cx42_bridge->dma_resources);
1754	}
1755
1756	/* Add location monitor to list */
1757	INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
1758	lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1759	if (lm == NULL) {
1760		dev_err(&pdev->dev, "Failed to allocate memory for "
1761		"location monitor resource structure\n");
1762		retval = -ENOMEM;
1763		goto err_lm;
1764	}
1765	lm->parent = ca91cx42_bridge;
1766	mutex_init(&lm->mtx);
1767	lm->locked = 0;
1768	lm->number = 1;
1769	lm->monitors = 4;
1770	list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
1771
1772	ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1773	ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1774	ca91cx42_bridge->master_get = ca91cx42_master_get;
1775	ca91cx42_bridge->master_set = ca91cx42_master_set;
1776	ca91cx42_bridge->master_read = ca91cx42_master_read;
1777	ca91cx42_bridge->master_write = ca91cx42_master_write;
1778	ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1779	ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1780	ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1781	ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1782	ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1783	ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1784	ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1785	ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1786	ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1787	ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1788	ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1789	ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
1790	ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
1791
1792	data = ioread32(ca91cx42_device->base + MISC_CTL);
1793	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1794		(data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1795	dev_info(&pdev->dev, "Slot ID is %d\n",
1796		ca91cx42_slot_get(ca91cx42_bridge));
1797
1798	if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1799		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1800
1801	/* Need to save ca91cx42_bridge pointer locally in link list for use in
1802	 * ca91cx42_remove()
1803	 */
1804	retval = vme_register_bridge(ca91cx42_bridge);
1805	if (retval != 0) {
1806		dev_err(&pdev->dev, "Chip Registration failed.\n");
1807		goto err_reg;
1808	}
1809
1810	pci_set_drvdata(pdev, ca91cx42_bridge);
1811
1812	return 0;
1813
1814err_reg:
1815	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1816err_lm:
1817	/* resources are stored in link list */
1818	list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1819		lm = list_entry(pos, struct vme_lm_resource, list);
1820		list_del(pos);
1821		kfree(lm);
1822	}
1823err_dma:
1824	/* resources are stored in link list */
1825	list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1826		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1827		list_del(pos);
1828		kfree(dma_ctrlr);
1829	}
1830err_slave:
1831	/* resources are stored in link list */
1832	list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1833		slave_image = list_entry(pos, struct vme_slave_resource, list);
1834		list_del(pos);
1835		kfree(slave_image);
1836	}
1837err_master:
1838	/* resources are stored in link list */
1839	list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1840		master_image = list_entry(pos, struct vme_master_resource,
1841			list);
1842		list_del(pos);
1843		kfree(master_image);
1844	}
1845
1846	ca91cx42_irq_exit(ca91cx42_device, pdev);
1847err_irq:
1848err_test:
1849	iounmap(ca91cx42_device->base);
1850err_remap:
1851	pci_release_regions(pdev);
1852err_resource:
1853	pci_disable_device(pdev);
1854err_enable:
1855	kfree(ca91cx42_device);
1856err_driver:
1857	kfree(ca91cx42_bridge);
1858err_struct:
1859	return retval;
1860
1861}
1862
1863static void ca91cx42_remove(struct pci_dev *pdev)
1864{
1865	struct list_head *pos = NULL, *n;
1866	struct vme_master_resource *master_image;
1867	struct vme_slave_resource *slave_image;
1868	struct vme_dma_resource *dma_ctrlr;
1869	struct vme_lm_resource *lm;
1870	struct ca91cx42_driver *bridge;
1871	struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1872
1873	bridge = ca91cx42_bridge->driver_priv;
1874
1875
1876	/* Turn off Ints */
1877	iowrite32(0, bridge->base + LINT_EN);
1878
1879	/* Turn off the windows */
1880	iowrite32(0x00800000, bridge->base + LSI0_CTL);
1881	iowrite32(0x00800000, bridge->base + LSI1_CTL);
1882	iowrite32(0x00800000, bridge->base + LSI2_CTL);
1883	iowrite32(0x00800000, bridge->base + LSI3_CTL);
1884	iowrite32(0x00800000, bridge->base + LSI4_CTL);
1885	iowrite32(0x00800000, bridge->base + LSI5_CTL);
1886	iowrite32(0x00800000, bridge->base + LSI6_CTL);
1887	iowrite32(0x00800000, bridge->base + LSI7_CTL);
1888	iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1889	iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1890	iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1891	iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1892	iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1893	iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1894	iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1895	iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1896
1897	vme_unregister_bridge(ca91cx42_bridge);
1898
1899	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1900
1901	/* resources are stored in link list */
1902	list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1903		lm = list_entry(pos, struct vme_lm_resource, list);
1904		list_del(pos);
1905		kfree(lm);
1906	}
1907
1908	/* resources are stored in link list */
1909	list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1910		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1911		list_del(pos);
1912		kfree(dma_ctrlr);
1913	}
1914
1915	/* resources are stored in link list */
1916	list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1917		slave_image = list_entry(pos, struct vme_slave_resource, list);
1918		list_del(pos);
1919		kfree(slave_image);
1920	}
1921
1922	/* resources are stored in link list */
1923	list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1924		master_image = list_entry(pos, struct vme_master_resource,
1925			list);
1926		list_del(pos);
1927		kfree(master_image);
1928	}
1929
1930	ca91cx42_irq_exit(bridge, pdev);
1931
1932	iounmap(bridge->base);
1933
1934	pci_release_regions(pdev);
1935
1936	pci_disable_device(pdev);
1937
1938	kfree(ca91cx42_bridge);
1939}
1940
1941module_pci_driver(ca91cx42_driver);
1942
1943MODULE_PARM_DESC(geoid, "Override geographical addressing");
1944module_param(geoid, int, 0);
1945
1946MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1947MODULE_LICENSE("GPL");
1948