1/*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute  it and/or modify it
11 * under  the terms of  the GNU General  Public License as published by the
12 * Free Software Foundation;  either version 2 of the  License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/mm.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/proc_fs.h>
22#include <linux/pci.h>
23#include <linux/poll.h>
24#include <linux/dma-mapping.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/time.h>
30#include <linux/io.h>
31#include <linux/uaccess.h>
32#include <linux/byteorder/generic.h>
33#include <linux/vme.h>
34
35#include "../vme_bridge.h"
36#include "vme_tsi148.h"
37
38static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
39static void tsi148_remove(struct pci_dev *);
40
41
42/* Module parameter */
43static bool err_chk;
44static int geoid;
45
46static const char driver_name[] = "vme_tsi148";
47
48static const struct pci_device_id tsi148_ids[] = {
49	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
50	{ },
51};
52
53static struct pci_driver tsi148_driver = {
54	.name = driver_name,
55	.id_table = tsi148_ids,
56	.probe = tsi148_probe,
57	.remove = tsi148_remove,
58};
59
60static void reg_join(unsigned int high, unsigned int low,
61	unsigned long long *variable)
62{
63	*variable = (unsigned long long)high << 32;
64	*variable |= (unsigned long long)low;
65}
66
67static void reg_split(unsigned long long variable, unsigned int *high,
68	unsigned int *low)
69{
70	*low = (unsigned int)variable & 0xFFFFFFFF;
71	*high = (unsigned int)(variable >> 32);
72}
73
74/*
75 * Wakes up DMA queue.
76 */
77static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
78	int channel_mask)
79{
80	u32 serviced = 0;
81
82	if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
83		wake_up(&bridge->dma_queue[0]);
84		serviced |= TSI148_LCSR_INTC_DMA0C;
85	}
86	if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
87		wake_up(&bridge->dma_queue[1]);
88		serviced |= TSI148_LCSR_INTC_DMA1C;
89	}
90
91	return serviced;
92}
93
94/*
95 * Wake up location monitor queue
96 */
97static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
98{
99	int i;
100	u32 serviced = 0;
101
102	for (i = 0; i < 4; i++) {
103		if (stat & TSI148_LCSR_INTS_LMS[i]) {
104			/* We only enable interrupts if the callback is set */
105			bridge->lm_callback[i](i);
106			serviced |= TSI148_LCSR_INTC_LMC[i];
107		}
108	}
109
110	return serviced;
111}
112
113/*
114 * Wake up mail box queue.
115 *
116 * XXX This functionality is not exposed up though API.
117 */
118static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
119{
120	int i;
121	u32 val;
122	u32 serviced = 0;
123	struct tsi148_driver *bridge;
124
125	bridge = tsi148_bridge->driver_priv;
126
127	for (i = 0; i < 4; i++) {
128		if (stat & TSI148_LCSR_INTS_MBS[i]) {
129			val = ioread32be(bridge->base +	TSI148_GCSR_MBOX[i]);
130			dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
131				": 0x%x\n", i, val);
132			serviced |= TSI148_LCSR_INTC_MBC[i];
133		}
134	}
135
136	return serviced;
137}
138
139/*
140 * Display error & status message when PERR (PCI) exception interrupt occurs.
141 */
142static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
143{
144	struct tsi148_driver *bridge;
145
146	bridge = tsi148_bridge->driver_priv;
147
148	dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
149		"attributes: %08x\n",
150		ioread32be(bridge->base + TSI148_LCSR_EDPAU),
151		ioread32be(bridge->base + TSI148_LCSR_EDPAL),
152		ioread32be(bridge->base + TSI148_LCSR_EDPAT));
153
154	dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
155		"completion reg: %08x\n",
156		ioread32be(bridge->base + TSI148_LCSR_EDPXA),
157		ioread32be(bridge->base + TSI148_LCSR_EDPXS));
158
159	iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
160
161	return TSI148_LCSR_INTC_PERRC;
162}
163
164/*
165 * Save address and status when VME error interrupt occurs.
166 */
167static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
168{
169	unsigned int error_addr_high, error_addr_low;
170	unsigned long long error_addr;
171	u32 error_attrib;
172	struct vme_bus_error *error = NULL;
173	struct tsi148_driver *bridge;
174
175	bridge = tsi148_bridge->driver_priv;
176
177	error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
178	error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
179	error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
180
181	reg_join(error_addr_high, error_addr_low, &error_addr);
182
183	/* Check for exception register overflow (we have lost error data) */
184	if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
185		dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
186			"Occurred\n");
187	}
188
189	if (err_chk) {
190		error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
191		if (error) {
192			error->address = error_addr;
193			error->attributes = error_attrib;
194			list_add_tail(&error->list, &tsi148_bridge->vme_errors);
195		} else {
196			dev_err(tsi148_bridge->parent,
197				"Unable to alloc memory for VMEbus Error reporting\n");
198		}
199	}
200
201	if (!error) {
202		dev_err(tsi148_bridge->parent,
203			"VME Bus Error at address: 0x%llx, attributes: %08x\n",
204			error_addr, error_attrib);
205	}
206
207	/* Clear Status */
208	iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
209
210	return TSI148_LCSR_INTC_VERRC;
211}
212
213/*
214 * Wake up IACK queue.
215 */
216static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
217{
218	wake_up(&bridge->iack_queue);
219
220	return TSI148_LCSR_INTC_IACKC;
221}
222
223/*
224 * Calling VME bus interrupt callback if provided.
225 */
226static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
227	u32 stat)
228{
229	int vec, i, serviced = 0;
230	struct tsi148_driver *bridge;
231
232	bridge = tsi148_bridge->driver_priv;
233
234	for (i = 7; i > 0; i--) {
235		if (stat & (1 << i)) {
236			/*
237			 * Note: Even though the registers are defined as
238			 * 32-bits in the spec, we only want to issue 8-bit
239			 * IACK cycles on the bus, read from offset 3.
240			 */
241			vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
242
243			vme_irq_handler(tsi148_bridge, i, vec);
244
245			serviced |= (1 << i);
246		}
247	}
248
249	return serviced;
250}
251
252/*
253 * Top level interrupt handler.  Clears appropriate interrupt status bits and
254 * then calls appropriate sub handler(s).
255 */
256static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
257{
258	u32 stat, enable, serviced = 0;
259	struct vme_bridge *tsi148_bridge;
260	struct tsi148_driver *bridge;
261
262	tsi148_bridge = ptr;
263
264	bridge = tsi148_bridge->driver_priv;
265
266	/* Determine which interrupts are unmasked and set */
267	enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
268	stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
269
270	/* Only look at unmasked interrupts */
271	stat &= enable;
272
273	if (unlikely(!stat))
274		return IRQ_NONE;
275
276	/* Call subhandlers as appropriate */
277	/* DMA irqs */
278	if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
279		serviced |= tsi148_DMA_irqhandler(bridge, stat);
280
281	/* Location monitor irqs */
282	if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
283			TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
284		serviced |= tsi148_LM_irqhandler(bridge, stat);
285
286	/* Mail box irqs */
287	if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
288			TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
289		serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
290
291	/* PCI bus error */
292	if (stat & TSI148_LCSR_INTS_PERRS)
293		serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
294
295	/* VME bus error */
296	if (stat & TSI148_LCSR_INTS_VERRS)
297		serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
298
299	/* IACK irq */
300	if (stat & TSI148_LCSR_INTS_IACKS)
301		serviced |= tsi148_IACK_irqhandler(bridge);
302
303	/* VME bus irqs */
304	if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
305			TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
306			TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
307			TSI148_LCSR_INTS_IRQ1S))
308		serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
309
310	/* Clear serviced interrupts */
311	iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
312
313	return IRQ_HANDLED;
314}
315
316static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
317{
318	int result;
319	unsigned int tmp;
320	struct pci_dev *pdev;
321	struct tsi148_driver *bridge;
322
323	pdev = to_pci_dev(tsi148_bridge->parent);
324
325	bridge = tsi148_bridge->driver_priv;
326
327	/* Initialise list for VME bus errors */
328	INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
329
330	mutex_init(&tsi148_bridge->irq_mtx);
331
332	result = request_irq(pdev->irq,
333			     tsi148_irqhandler,
334			     IRQF_SHARED,
335			     driver_name, tsi148_bridge);
336	if (result) {
337		dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
338			"vector %02X\n", pdev->irq);
339		return result;
340	}
341
342	/* Enable and unmask interrupts */
343	tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
344		TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
345		TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
346		TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
347		TSI148_LCSR_INTEO_IACKEO;
348
349	/* This leaves the following interrupts masked.
350	 * TSI148_LCSR_INTEO_VIEEO
351	 * TSI148_LCSR_INTEO_SYSFLEO
352	 * TSI148_LCSR_INTEO_ACFLEO
353	 */
354
355	/* Don't enable Location Monitor interrupts here - they will be
356	 * enabled when the location monitors are properly configured and
357	 * a callback has been attached.
358	 * TSI148_LCSR_INTEO_LM0EO
359	 * TSI148_LCSR_INTEO_LM1EO
360	 * TSI148_LCSR_INTEO_LM2EO
361	 * TSI148_LCSR_INTEO_LM3EO
362	 */
363
364	/* Don't enable VME interrupts until we add a handler, else the board
365	 * will respond to it and we don't want that unless it knows how to
366	 * properly deal with it.
367	 * TSI148_LCSR_INTEO_IRQ7EO
368	 * TSI148_LCSR_INTEO_IRQ6EO
369	 * TSI148_LCSR_INTEO_IRQ5EO
370	 * TSI148_LCSR_INTEO_IRQ4EO
371	 * TSI148_LCSR_INTEO_IRQ3EO
372	 * TSI148_LCSR_INTEO_IRQ2EO
373	 * TSI148_LCSR_INTEO_IRQ1EO
374	 */
375
376	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
377	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
378
379	return 0;
380}
381
382static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
383	struct pci_dev *pdev)
384{
385	struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
386
387	/* Turn off interrupts */
388	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
389	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
390
391	/* Clear all interrupts */
392	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
393
394	/* Detach interrupt handler */
395	free_irq(pdev->irq, tsi148_bridge);
396}
397
398/*
399 * Check to see if an IACk has been received, return true (1) or false (0).
400 */
401static int tsi148_iack_received(struct tsi148_driver *bridge)
402{
403	u32 tmp;
404
405	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
406
407	if (tmp & TSI148_LCSR_VICR_IRQS)
408		return 0;
409	else
410		return 1;
411}
412
413/*
414 * Configure VME interrupt
415 */
416static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
417	int state, int sync)
418{
419	struct pci_dev *pdev;
420	u32 tmp;
421	struct tsi148_driver *bridge;
422
423	bridge = tsi148_bridge->driver_priv;
424
425	/* We need to do the ordering differently for enabling and disabling */
426	if (state == 0) {
427		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
428		tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
429		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
430
431		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
432		tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
433		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
434
435		if (sync != 0) {
436			pdev = to_pci_dev(tsi148_bridge->parent);
437			synchronize_irq(pdev->irq);
438		}
439	} else {
440		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
441		tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
442		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
443
444		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
445		tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
446		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
447	}
448}
449
450/*
451 * Generate a VME bus interrupt at the requested level & vector. Wait for
452 * interrupt to be acked.
453 */
454static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
455	int statid)
456{
457	u32 tmp;
458	struct tsi148_driver *bridge;
459
460	bridge = tsi148_bridge->driver_priv;
461
462	mutex_lock(&bridge->vme_int);
463
464	/* Read VICR register */
465	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
466
467	/* Set Status/ID */
468	tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
469		(statid & TSI148_LCSR_VICR_STID_M);
470	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
471
472	/* Assert VMEbus IRQ */
473	tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
474	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
475
476	/* XXX Consider implementing a timeout? */
477	wait_event_interruptible(bridge->iack_queue,
478		tsi148_iack_received(bridge));
479
480	mutex_unlock(&bridge->vme_int);
481
482	return 0;
483}
484
485/*
486 * Find the first error in this address range
487 */
488static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
489	u32 aspace, unsigned long long address, size_t count)
490{
491	struct list_head *err_pos;
492	struct vme_bus_error *vme_err, *valid = NULL;
493	unsigned long long bound;
494
495	bound = address + count;
496
497	/*
498	 * XXX We are currently not looking at the address space when parsing
499	 *     for errors. This is because parsing the Address Modifier Codes
500	 *     is going to be quite resource intensive to do properly. We
501	 *     should be OK just looking at the addresses and this is certainly
502	 *     much better than what we had before.
503	 */
504	err_pos = NULL;
505	/* Iterate through errors */
506	list_for_each(err_pos, &tsi148_bridge->vme_errors) {
507		vme_err = list_entry(err_pos, struct vme_bus_error, list);
508		if ((vme_err->address >= address) &&
509			(vme_err->address < bound)) {
510
511			valid = vme_err;
512			break;
513		}
514	}
515
516	return valid;
517}
518
519/*
520 * Clear errors in the provided address range.
521 */
522static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
523	u32 aspace, unsigned long long address, size_t count)
524{
525	struct list_head *err_pos, *temp;
526	struct vme_bus_error *vme_err;
527	unsigned long long bound;
528
529	bound = address + count;
530
531	/*
532	 * XXX We are currently not looking at the address space when parsing
533	 *     for errors. This is because parsing the Address Modifier Codes
534	 *     is going to be quite resource intensive to do properly. We
535	 *     should be OK just looking at the addresses and this is certainly
536	 *     much better than what we had before.
537	 */
538	err_pos = NULL;
539	/* Iterate through errors */
540	list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
541		vme_err = list_entry(err_pos, struct vme_bus_error, list);
542
543		if ((vme_err->address >= address) &&
544			(vme_err->address < bound)) {
545
546			list_del(err_pos);
547			kfree(vme_err);
548		}
549	}
550}
551
552/*
553 * Initialize a slave window with the requested attributes.
554 */
555static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
556	unsigned long long vme_base, unsigned long long size,
557	dma_addr_t pci_base, u32 aspace, u32 cycle)
558{
559	unsigned int i, addr = 0, granularity = 0;
560	unsigned int temp_ctl = 0;
561	unsigned int vme_base_low, vme_base_high;
562	unsigned int vme_bound_low, vme_bound_high;
563	unsigned int pci_offset_low, pci_offset_high;
564	unsigned long long vme_bound, pci_offset;
565	struct vme_bridge *tsi148_bridge;
566	struct tsi148_driver *bridge;
567
568	tsi148_bridge = image->parent;
569	bridge = tsi148_bridge->driver_priv;
570
571	i = image->number;
572
573	switch (aspace) {
574	case VME_A16:
575		granularity = 0x10;
576		addr |= TSI148_LCSR_ITAT_AS_A16;
577		break;
578	case VME_A24:
579		granularity = 0x1000;
580		addr |= TSI148_LCSR_ITAT_AS_A24;
581		break;
582	case VME_A32:
583		granularity = 0x10000;
584		addr |= TSI148_LCSR_ITAT_AS_A32;
585		break;
586	case VME_A64:
587		granularity = 0x10000;
588		addr |= TSI148_LCSR_ITAT_AS_A64;
589		break;
590	default:
591		dev_err(tsi148_bridge->parent, "Invalid address space\n");
592		return -EINVAL;
593		break;
594	}
595
596	/* Convert 64-bit variables to 2x 32-bit variables */
597	reg_split(vme_base, &vme_base_high, &vme_base_low);
598
599	/*
600	 * Bound address is a valid address for the window, adjust
601	 * accordingly
602	 */
603	vme_bound = vme_base + size - granularity;
604	reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
605	pci_offset = (unsigned long long)pci_base - vme_base;
606	reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
607
608	if (vme_base_low & (granularity - 1)) {
609		dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
610		return -EINVAL;
611	}
612	if (vme_bound_low & (granularity - 1)) {
613		dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
614		return -EINVAL;
615	}
616	if (pci_offset_low & (granularity - 1)) {
617		dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
618			"alignment\n");
619		return -EINVAL;
620	}
621
622	/*  Disable while we are mucking around */
623	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
624		TSI148_LCSR_OFFSET_ITAT);
625	temp_ctl &= ~TSI148_LCSR_ITAT_EN;
626	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
627		TSI148_LCSR_OFFSET_ITAT);
628
629	/* Setup mapping */
630	iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
631		TSI148_LCSR_OFFSET_ITSAU);
632	iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
633		TSI148_LCSR_OFFSET_ITSAL);
634	iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
635		TSI148_LCSR_OFFSET_ITEAU);
636	iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
637		TSI148_LCSR_OFFSET_ITEAL);
638	iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
639		TSI148_LCSR_OFFSET_ITOFU);
640	iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
641		TSI148_LCSR_OFFSET_ITOFL);
642
643	/* Setup 2eSST speeds */
644	temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
645	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
646	case VME_2eSST160:
647		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
648		break;
649	case VME_2eSST267:
650		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
651		break;
652	case VME_2eSST320:
653		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
654		break;
655	}
656
657	/* Setup cycle types */
658	temp_ctl &= ~(0x1F << 7);
659	if (cycle & VME_BLT)
660		temp_ctl |= TSI148_LCSR_ITAT_BLT;
661	if (cycle & VME_MBLT)
662		temp_ctl |= TSI148_LCSR_ITAT_MBLT;
663	if (cycle & VME_2eVME)
664		temp_ctl |= TSI148_LCSR_ITAT_2eVME;
665	if (cycle & VME_2eSST)
666		temp_ctl |= TSI148_LCSR_ITAT_2eSST;
667	if (cycle & VME_2eSSTB)
668		temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
669
670	/* Setup address space */
671	temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
672	temp_ctl |= addr;
673
674	temp_ctl &= ~0xF;
675	if (cycle & VME_SUPER)
676		temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
677	if (cycle & VME_USER)
678		temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
679	if (cycle & VME_PROG)
680		temp_ctl |= TSI148_LCSR_ITAT_PGM;
681	if (cycle & VME_DATA)
682		temp_ctl |= TSI148_LCSR_ITAT_DATA;
683
684	/* Write ctl reg without enable */
685	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
686		TSI148_LCSR_OFFSET_ITAT);
687
688	if (enabled)
689		temp_ctl |= TSI148_LCSR_ITAT_EN;
690
691	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
692		TSI148_LCSR_OFFSET_ITAT);
693
694	return 0;
695}
696
697/*
698 * Get slave window configuration.
699 */
700static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
701	unsigned long long *vme_base, unsigned long long *size,
702	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
703{
704	unsigned int i, granularity = 0, ctl = 0;
705	unsigned int vme_base_low, vme_base_high;
706	unsigned int vme_bound_low, vme_bound_high;
707	unsigned int pci_offset_low, pci_offset_high;
708	unsigned long long vme_bound, pci_offset;
709	struct tsi148_driver *bridge;
710
711	bridge = image->parent->driver_priv;
712
713	i = image->number;
714
715	/* Read registers */
716	ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
717		TSI148_LCSR_OFFSET_ITAT);
718
719	vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
720		TSI148_LCSR_OFFSET_ITSAU);
721	vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
722		TSI148_LCSR_OFFSET_ITSAL);
723	vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
724		TSI148_LCSR_OFFSET_ITEAU);
725	vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
726		TSI148_LCSR_OFFSET_ITEAL);
727	pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
728		TSI148_LCSR_OFFSET_ITOFU);
729	pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
730		TSI148_LCSR_OFFSET_ITOFL);
731
732	/* Convert 64-bit variables to 2x 32-bit variables */
733	reg_join(vme_base_high, vme_base_low, vme_base);
734	reg_join(vme_bound_high, vme_bound_low, &vme_bound);
735	reg_join(pci_offset_high, pci_offset_low, &pci_offset);
736
737	*pci_base = (dma_addr_t)(*vme_base + pci_offset);
738
739	*enabled = 0;
740	*aspace = 0;
741	*cycle = 0;
742
743	if (ctl & TSI148_LCSR_ITAT_EN)
744		*enabled = 1;
745
746	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
747		granularity = 0x10;
748		*aspace |= VME_A16;
749	}
750	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
751		granularity = 0x1000;
752		*aspace |= VME_A24;
753	}
754	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
755		granularity = 0x10000;
756		*aspace |= VME_A32;
757	}
758	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
759		granularity = 0x10000;
760		*aspace |= VME_A64;
761	}
762
763	/* Need granularity before we set the size */
764	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
765
766
767	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
768		*cycle |= VME_2eSST160;
769	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
770		*cycle |= VME_2eSST267;
771	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
772		*cycle |= VME_2eSST320;
773
774	if (ctl & TSI148_LCSR_ITAT_BLT)
775		*cycle |= VME_BLT;
776	if (ctl & TSI148_LCSR_ITAT_MBLT)
777		*cycle |= VME_MBLT;
778	if (ctl & TSI148_LCSR_ITAT_2eVME)
779		*cycle |= VME_2eVME;
780	if (ctl & TSI148_LCSR_ITAT_2eSST)
781		*cycle |= VME_2eSST;
782	if (ctl & TSI148_LCSR_ITAT_2eSSTB)
783		*cycle |= VME_2eSSTB;
784
785	if (ctl & TSI148_LCSR_ITAT_SUPR)
786		*cycle |= VME_SUPER;
787	if (ctl & TSI148_LCSR_ITAT_NPRIV)
788		*cycle |= VME_USER;
789	if (ctl & TSI148_LCSR_ITAT_PGM)
790		*cycle |= VME_PROG;
791	if (ctl & TSI148_LCSR_ITAT_DATA)
792		*cycle |= VME_DATA;
793
794	return 0;
795}
796
797/*
798 * Allocate and map PCI Resource
799 */
800static int tsi148_alloc_resource(struct vme_master_resource *image,
801	unsigned long long size)
802{
803	unsigned long long existing_size;
804	int retval = 0;
805	struct pci_dev *pdev;
806	struct vme_bridge *tsi148_bridge;
807
808	tsi148_bridge = image->parent;
809
810	pdev = to_pci_dev(tsi148_bridge->parent);
811
812	existing_size = (unsigned long long)(image->bus_resource.end -
813		image->bus_resource.start);
814
815	/* If the existing size is OK, return */
816	if ((size != 0) && (existing_size == (size - 1)))
817		return 0;
818
819	if (existing_size != 0) {
820		iounmap(image->kern_base);
821		image->kern_base = NULL;
822		kfree(image->bus_resource.name);
823		release_resource(&image->bus_resource);
824		memset(&image->bus_resource, 0, sizeof(struct resource));
825	}
826
827	/* Exit here if size is zero */
828	if (size == 0)
829		return 0;
830
831	if (image->bus_resource.name == NULL) {
832		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
833		if (image->bus_resource.name == NULL) {
834			dev_err(tsi148_bridge->parent, "Unable to allocate "
835				"memory for resource name\n");
836			retval = -ENOMEM;
837			goto err_name;
838		}
839	}
840
841	sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
842		image->number);
843
844	image->bus_resource.start = 0;
845	image->bus_resource.end = (unsigned long)size;
846	image->bus_resource.flags = IORESOURCE_MEM;
847
848	retval = pci_bus_alloc_resource(pdev->bus,
849		&image->bus_resource, size, size, PCIBIOS_MIN_MEM,
850		0, NULL, NULL);
851	if (retval) {
852		dev_err(tsi148_bridge->parent, "Failed to allocate mem "
853			"resource for window %d size 0x%lx start 0x%lx\n",
854			image->number, (unsigned long)size,
855			(unsigned long)image->bus_resource.start);
856		goto err_resource;
857	}
858
859	image->kern_base = ioremap_nocache(
860		image->bus_resource.start, size);
861	if (image->kern_base == NULL) {
862		dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
863		retval = -ENOMEM;
864		goto err_remap;
865	}
866
867	return 0;
868
869err_remap:
870	release_resource(&image->bus_resource);
871err_resource:
872	kfree(image->bus_resource.name);
873	memset(&image->bus_resource, 0, sizeof(struct resource));
874err_name:
875	return retval;
876}
877
878/*
879 * Free and unmap PCI Resource
880 */
881static void tsi148_free_resource(struct vme_master_resource *image)
882{
883	iounmap(image->kern_base);
884	image->kern_base = NULL;
885	release_resource(&image->bus_resource);
886	kfree(image->bus_resource.name);
887	memset(&image->bus_resource, 0, sizeof(struct resource));
888}
889
890/*
891 * Set the attributes of an outbound window.
892 */
893static int tsi148_master_set(struct vme_master_resource *image, int enabled,
894	unsigned long long vme_base, unsigned long long size, u32 aspace,
895	u32 cycle, u32 dwidth)
896{
897	int retval = 0;
898	unsigned int i;
899	unsigned int temp_ctl = 0;
900	unsigned int pci_base_low, pci_base_high;
901	unsigned int pci_bound_low, pci_bound_high;
902	unsigned int vme_offset_low, vme_offset_high;
903	unsigned long long pci_bound, vme_offset, pci_base;
904	struct vme_bridge *tsi148_bridge;
905	struct tsi148_driver *bridge;
906	struct pci_bus_region region;
907	struct pci_dev *pdev;
908
909	tsi148_bridge = image->parent;
910
911	bridge = tsi148_bridge->driver_priv;
912
913	pdev = to_pci_dev(tsi148_bridge->parent);
914
915	/* Verify input data */
916	if (vme_base & 0xFFFF) {
917		dev_err(tsi148_bridge->parent, "Invalid VME Window "
918			"alignment\n");
919		retval = -EINVAL;
920		goto err_window;
921	}
922
923	if ((size == 0) && (enabled != 0)) {
924		dev_err(tsi148_bridge->parent, "Size must be non-zero for "
925			"enabled windows\n");
926		retval = -EINVAL;
927		goto err_window;
928	}
929
930	spin_lock(&image->lock);
931
932	/* Let's allocate the resource here rather than further up the stack as
933	 * it avoids pushing loads of bus dependent stuff up the stack. If size
934	 * is zero, any existing resource will be freed.
935	 */
936	retval = tsi148_alloc_resource(image, size);
937	if (retval) {
938		spin_unlock(&image->lock);
939		dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
940			"resource\n");
941		goto err_res;
942	}
943
944	if (size == 0) {
945		pci_base = 0;
946		pci_bound = 0;
947		vme_offset = 0;
948	} else {
949		pcibios_resource_to_bus(pdev->bus, &region,
950					&image->bus_resource);
951		pci_base = region.start;
952
953		/*
954		 * Bound address is a valid address for the window, adjust
955		 * according to window granularity.
956		 */
957		pci_bound = pci_base + (size - 0x10000);
958		vme_offset = vme_base - pci_base;
959	}
960
961	/* Convert 64-bit variables to 2x 32-bit variables */
962	reg_split(pci_base, &pci_base_high, &pci_base_low);
963	reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
964	reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
965
966	if (pci_base_low & 0xFFFF) {
967		spin_unlock(&image->lock);
968		dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
969		retval = -EINVAL;
970		goto err_gran;
971	}
972	if (pci_bound_low & 0xFFFF) {
973		spin_unlock(&image->lock);
974		dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
975		retval = -EINVAL;
976		goto err_gran;
977	}
978	if (vme_offset_low & 0xFFFF) {
979		spin_unlock(&image->lock);
980		dev_err(tsi148_bridge->parent, "Invalid VME Offset "
981			"alignment\n");
982		retval = -EINVAL;
983		goto err_gran;
984	}
985
986	i = image->number;
987
988	/* Disable while we are mucking around */
989	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
990		TSI148_LCSR_OFFSET_OTAT);
991	temp_ctl &= ~TSI148_LCSR_OTAT_EN;
992	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
993		TSI148_LCSR_OFFSET_OTAT);
994
995	/* Setup 2eSST speeds */
996	temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
997	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
998	case VME_2eSST160:
999		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1000		break;
1001	case VME_2eSST267:
1002		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1003		break;
1004	case VME_2eSST320:
1005		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1006		break;
1007	}
1008
1009	/* Setup cycle types */
1010	if (cycle & VME_BLT) {
1011		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1012		temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1013	}
1014	if (cycle & VME_MBLT) {
1015		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1016		temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1017	}
1018	if (cycle & VME_2eVME) {
1019		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1020		temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1021	}
1022	if (cycle & VME_2eSST) {
1023		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1024		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1025	}
1026	if (cycle & VME_2eSSTB) {
1027		dev_warn(tsi148_bridge->parent, "Currently not setting "
1028			"Broadcast Select Registers\n");
1029		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1030		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1031	}
1032
1033	/* Setup data width */
1034	temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1035	switch (dwidth) {
1036	case VME_D16:
1037		temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1038		break;
1039	case VME_D32:
1040		temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1041		break;
1042	default:
1043		spin_unlock(&image->lock);
1044		dev_err(tsi148_bridge->parent, "Invalid data width\n");
1045		retval = -EINVAL;
1046		goto err_dwidth;
1047	}
1048
1049	/* Setup address space */
1050	temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1051	switch (aspace) {
1052	case VME_A16:
1053		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1054		break;
1055	case VME_A24:
1056		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1057		break;
1058	case VME_A32:
1059		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1060		break;
1061	case VME_A64:
1062		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1063		break;
1064	case VME_CRCSR:
1065		temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1066		break;
1067	case VME_USER1:
1068		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1069		break;
1070	case VME_USER2:
1071		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1072		break;
1073	case VME_USER3:
1074		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1075		break;
1076	case VME_USER4:
1077		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1078		break;
1079	default:
1080		spin_unlock(&image->lock);
1081		dev_err(tsi148_bridge->parent, "Invalid address space\n");
1082		retval = -EINVAL;
1083		goto err_aspace;
1084		break;
1085	}
1086
1087	temp_ctl &= ~(3<<4);
1088	if (cycle & VME_SUPER)
1089		temp_ctl |= TSI148_LCSR_OTAT_SUP;
1090	if (cycle & VME_PROG)
1091		temp_ctl |= TSI148_LCSR_OTAT_PGM;
1092
1093	/* Setup mapping */
1094	iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1095		TSI148_LCSR_OFFSET_OTSAU);
1096	iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1097		TSI148_LCSR_OFFSET_OTSAL);
1098	iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1099		TSI148_LCSR_OFFSET_OTEAU);
1100	iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1101		TSI148_LCSR_OFFSET_OTEAL);
1102	iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1103		TSI148_LCSR_OFFSET_OTOFU);
1104	iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1105		TSI148_LCSR_OFFSET_OTOFL);
1106
1107	/* Write ctl reg without enable */
1108	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1109		TSI148_LCSR_OFFSET_OTAT);
1110
1111	if (enabled)
1112		temp_ctl |= TSI148_LCSR_OTAT_EN;
1113
1114	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1115		TSI148_LCSR_OFFSET_OTAT);
1116
1117	spin_unlock(&image->lock);
1118	return 0;
1119
1120err_aspace:
1121err_dwidth:
1122err_gran:
1123	tsi148_free_resource(image);
1124err_res:
1125err_window:
1126	return retval;
1127
1128}
1129
1130/*
1131 * Set the attributes of an outbound window.
1132 *
1133 * XXX Not parsing prefetch information.
1134 */
1135static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1136	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1137	u32 *cycle, u32 *dwidth)
1138{
1139	unsigned int i, ctl;
1140	unsigned int pci_base_low, pci_base_high;
1141	unsigned int pci_bound_low, pci_bound_high;
1142	unsigned int vme_offset_low, vme_offset_high;
1143
1144	unsigned long long pci_base, pci_bound, vme_offset;
1145	struct tsi148_driver *bridge;
1146
1147	bridge = image->parent->driver_priv;
1148
1149	i = image->number;
1150
1151	ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1152		TSI148_LCSR_OFFSET_OTAT);
1153
1154	pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1155		TSI148_LCSR_OFFSET_OTSAU);
1156	pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1157		TSI148_LCSR_OFFSET_OTSAL);
1158	pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1159		TSI148_LCSR_OFFSET_OTEAU);
1160	pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1161		TSI148_LCSR_OFFSET_OTEAL);
1162	vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1163		TSI148_LCSR_OFFSET_OTOFU);
1164	vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1165		TSI148_LCSR_OFFSET_OTOFL);
1166
1167	/* Convert 64-bit variables to 2x 32-bit variables */
1168	reg_join(pci_base_high, pci_base_low, &pci_base);
1169	reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1170	reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1171
1172	*vme_base = pci_base + vme_offset;
1173	*size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1174
1175	*enabled = 0;
1176	*aspace = 0;
1177	*cycle = 0;
1178	*dwidth = 0;
1179
1180	if (ctl & TSI148_LCSR_OTAT_EN)
1181		*enabled = 1;
1182
1183	/* Setup address space */
1184	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1185		*aspace |= VME_A16;
1186	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1187		*aspace |= VME_A24;
1188	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1189		*aspace |= VME_A32;
1190	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1191		*aspace |= VME_A64;
1192	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1193		*aspace |= VME_CRCSR;
1194	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1195		*aspace |= VME_USER1;
1196	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1197		*aspace |= VME_USER2;
1198	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1199		*aspace |= VME_USER3;
1200	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1201		*aspace |= VME_USER4;
1202
1203	/* Setup 2eSST speeds */
1204	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1205		*cycle |= VME_2eSST160;
1206	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1207		*cycle |= VME_2eSST267;
1208	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1209		*cycle |= VME_2eSST320;
1210
1211	/* Setup cycle types */
1212	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1213		*cycle |= VME_SCT;
1214	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1215		*cycle |= VME_BLT;
1216	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1217		*cycle |= VME_MBLT;
1218	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1219		*cycle |= VME_2eVME;
1220	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1221		*cycle |= VME_2eSST;
1222	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1223		*cycle |= VME_2eSSTB;
1224
1225	if (ctl & TSI148_LCSR_OTAT_SUP)
1226		*cycle |= VME_SUPER;
1227	else
1228		*cycle |= VME_USER;
1229
1230	if (ctl & TSI148_LCSR_OTAT_PGM)
1231		*cycle |= VME_PROG;
1232	else
1233		*cycle |= VME_DATA;
1234
1235	/* Setup data width */
1236	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1237		*dwidth = VME_D16;
1238	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1239		*dwidth = VME_D32;
1240
1241	return 0;
1242}
1243
1244
1245static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1246	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1247	u32 *cycle, u32 *dwidth)
1248{
1249	int retval;
1250
1251	spin_lock(&image->lock);
1252
1253	retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1254		cycle, dwidth);
1255
1256	spin_unlock(&image->lock);
1257
1258	return retval;
1259}
1260
1261static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1262	size_t count, loff_t offset)
1263{
1264	int retval, enabled;
1265	unsigned long long vme_base, size;
1266	u32 aspace, cycle, dwidth;
1267	struct vme_bus_error *vme_err = NULL;
1268	struct vme_bridge *tsi148_bridge;
1269	void __iomem *addr = image->kern_base + offset;
1270	unsigned int done = 0;
1271	unsigned int count32;
1272
1273	tsi148_bridge = image->parent;
1274
1275	spin_lock(&image->lock);
1276
1277	/* The following code handles VME address alignment. We cannot use
1278	 * memcpy_xxx here because it may cut data transfers in to 8-bit
1279	 * cycles when D16 or D32 cycles are required on the VME bus.
1280	 * On the other hand, the bridge itself assures that the maximum data
1281	 * cycle configured for the transfer is used and splits it
1282	 * automatically for non-aligned addresses, so we don't want the
1283	 * overhead of needlessly forcing small transfers for the entire cycle.
1284	 */
1285	if ((uintptr_t)addr & 0x1) {
1286		*(u8 *)buf = ioread8(addr);
1287		done += 1;
1288		if (done == count)
1289			goto out;
1290	}
1291	if ((uintptr_t)(addr + done) & 0x2) {
1292		if ((count - done) < 2) {
1293			*(u8 *)(buf + done) = ioread8(addr + done);
1294			done += 1;
1295			goto out;
1296		} else {
1297			*(u16 *)(buf + done) = ioread16(addr + done);
1298			done += 2;
1299		}
1300	}
1301
1302	count32 = (count - done) & ~0x3;
1303	while (done < count32) {
1304		*(u32 *)(buf + done) = ioread32(addr + done);
1305		done += 4;
1306	}
1307
1308	if ((count - done) & 0x2) {
1309		*(u16 *)(buf + done) = ioread16(addr + done);
1310		done += 2;
1311	}
1312	if ((count - done) & 0x1) {
1313		*(u8 *)(buf + done) = ioread8(addr + done);
1314		done += 1;
1315	}
1316
1317out:
1318	retval = count;
1319
1320	if (!err_chk)
1321		goto skip_chk;
1322
1323	__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1324		&dwidth);
1325
1326	vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1327		count);
1328	if (vme_err != NULL) {
1329		dev_err(image->parent->parent, "First VME read error detected "
1330			"an at address 0x%llx\n", vme_err->address);
1331		retval = vme_err->address - (vme_base + offset);
1332		/* Clear down save errors in this address range */
1333		tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1334			count);
1335	}
1336
1337skip_chk:
1338	spin_unlock(&image->lock);
1339
1340	return retval;
1341}
1342
1343
1344static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1345	size_t count, loff_t offset)
1346{
1347	int retval = 0, enabled;
1348	unsigned long long vme_base, size;
1349	u32 aspace, cycle, dwidth;
1350	void __iomem *addr = image->kern_base + offset;
1351	unsigned int done = 0;
1352	unsigned int count32;
1353
1354	struct vme_bus_error *vme_err = NULL;
1355	struct vme_bridge *tsi148_bridge;
1356	struct tsi148_driver *bridge;
1357
1358	tsi148_bridge = image->parent;
1359
1360	bridge = tsi148_bridge->driver_priv;
1361
1362	spin_lock(&image->lock);
1363
1364	/* Here we apply for the same strategy we do in master_read
1365	 * function in order to assure the correct cycles.
1366	 */
1367	if ((uintptr_t)addr & 0x1) {
1368		iowrite8(*(u8 *)buf, addr);
1369		done += 1;
1370		if (done == count)
1371			goto out;
1372	}
1373	if ((uintptr_t)(addr + done) & 0x2) {
1374		if ((count - done) < 2) {
1375			iowrite8(*(u8 *)(buf + done), addr + done);
1376			done += 1;
1377			goto out;
1378		} else {
1379			iowrite16(*(u16 *)(buf + done), addr + done);
1380			done += 2;
1381		}
1382	}
1383
1384	count32 = (count - done) & ~0x3;
1385	while (done < count32) {
1386		iowrite32(*(u32 *)(buf + done), addr + done);
1387		done += 4;
1388	}
1389
1390	if ((count - done) & 0x2) {
1391		iowrite16(*(u16 *)(buf + done), addr + done);
1392		done += 2;
1393	}
1394	if ((count - done) & 0x1) {
1395		iowrite8(*(u8 *)(buf + done), addr + done);
1396		done += 1;
1397	}
1398
1399out:
1400	retval = count;
1401
1402	/*
1403	 * Writes are posted. We need to do a read on the VME bus to flush out
1404	 * all of the writes before we check for errors. We can't guarantee
1405	 * that reading the data we have just written is safe. It is believed
1406	 * that there isn't any read, write re-ordering, so we can read any
1407	 * location in VME space, so lets read the Device ID from the tsi148's
1408	 * own registers as mapped into CR/CSR space.
1409	 *
1410	 * We check for saved errors in the written address range/space.
1411	 */
1412
1413	if (!err_chk)
1414		goto skip_chk;
1415
1416	/*
1417	 * Get window info first, to maximise the time that the buffers may
1418	 * fluch on their own
1419	 */
1420	__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1421		&dwidth);
1422
1423	ioread16(bridge->flush_image->kern_base + 0x7F000);
1424
1425	vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1426		count);
1427	if (vme_err != NULL) {
1428		dev_warn(tsi148_bridge->parent, "First VME write error detected"
1429			" an at address 0x%llx\n", vme_err->address);
1430		retval = vme_err->address - (vme_base + offset);
1431		/* Clear down save errors in this address range */
1432		tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1433			count);
1434	}
1435
1436skip_chk:
1437	spin_unlock(&image->lock);
1438
1439	return retval;
1440}
1441
1442/*
1443 * Perform an RMW cycle on the VME bus.
1444 *
1445 * Requires a previously configured master window, returns final value.
1446 */
1447static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1448	unsigned int mask, unsigned int compare, unsigned int swap,
1449	loff_t offset)
1450{
1451	unsigned long long pci_addr;
1452	unsigned int pci_addr_high, pci_addr_low;
1453	u32 tmp, result;
1454	int i;
1455	struct tsi148_driver *bridge;
1456
1457	bridge = image->parent->driver_priv;
1458
1459	/* Find the PCI address that maps to the desired VME address */
1460	i = image->number;
1461
1462	/* Locking as we can only do one of these at a time */
1463	mutex_lock(&bridge->vme_rmw);
1464
1465	/* Lock image */
1466	spin_lock(&image->lock);
1467
1468	pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1469		TSI148_LCSR_OFFSET_OTSAU);
1470	pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1471		TSI148_LCSR_OFFSET_OTSAL);
1472
1473	reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1474	reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1475
1476	/* Configure registers */
1477	iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1478	iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1479	iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1480	iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1481	iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1482
1483	/* Enable RMW */
1484	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1485	tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1486	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1487
1488	/* Kick process off with a read to the required address. */
1489	result = ioread32be(image->kern_base + offset);
1490
1491	/* Disable RMW */
1492	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1493	tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1494	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1495
1496	spin_unlock(&image->lock);
1497
1498	mutex_unlock(&bridge->vme_rmw);
1499
1500	return result;
1501}
1502
1503static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1504	u32 aspace, u32 cycle, u32 dwidth)
1505{
1506	u32 val;
1507
1508	val = be32_to_cpu(*attr);
1509
1510	/* Setup 2eSST speeds */
1511	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1512	case VME_2eSST160:
1513		val |= TSI148_LCSR_DSAT_2eSSTM_160;
1514		break;
1515	case VME_2eSST267:
1516		val |= TSI148_LCSR_DSAT_2eSSTM_267;
1517		break;
1518	case VME_2eSST320:
1519		val |= TSI148_LCSR_DSAT_2eSSTM_320;
1520		break;
1521	}
1522
1523	/* Setup cycle types */
1524	if (cycle & VME_SCT)
1525		val |= TSI148_LCSR_DSAT_TM_SCT;
1526
1527	if (cycle & VME_BLT)
1528		val |= TSI148_LCSR_DSAT_TM_BLT;
1529
1530	if (cycle & VME_MBLT)
1531		val |= TSI148_LCSR_DSAT_TM_MBLT;
1532
1533	if (cycle & VME_2eVME)
1534		val |= TSI148_LCSR_DSAT_TM_2eVME;
1535
1536	if (cycle & VME_2eSST)
1537		val |= TSI148_LCSR_DSAT_TM_2eSST;
1538
1539	if (cycle & VME_2eSSTB) {
1540		dev_err(dev, "Currently not setting Broadcast Select "
1541			"Registers\n");
1542		val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1543	}
1544
1545	/* Setup data width */
1546	switch (dwidth) {
1547	case VME_D16:
1548		val |= TSI148_LCSR_DSAT_DBW_16;
1549		break;
1550	case VME_D32:
1551		val |= TSI148_LCSR_DSAT_DBW_32;
1552		break;
1553	default:
1554		dev_err(dev, "Invalid data width\n");
1555		return -EINVAL;
1556	}
1557
1558	/* Setup address space */
1559	switch (aspace) {
1560	case VME_A16:
1561		val |= TSI148_LCSR_DSAT_AMODE_A16;
1562		break;
1563	case VME_A24:
1564		val |= TSI148_LCSR_DSAT_AMODE_A24;
1565		break;
1566	case VME_A32:
1567		val |= TSI148_LCSR_DSAT_AMODE_A32;
1568		break;
1569	case VME_A64:
1570		val |= TSI148_LCSR_DSAT_AMODE_A64;
1571		break;
1572	case VME_CRCSR:
1573		val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1574		break;
1575	case VME_USER1:
1576		val |= TSI148_LCSR_DSAT_AMODE_USER1;
1577		break;
1578	case VME_USER2:
1579		val |= TSI148_LCSR_DSAT_AMODE_USER2;
1580		break;
1581	case VME_USER3:
1582		val |= TSI148_LCSR_DSAT_AMODE_USER3;
1583		break;
1584	case VME_USER4:
1585		val |= TSI148_LCSR_DSAT_AMODE_USER4;
1586		break;
1587	default:
1588		dev_err(dev, "Invalid address space\n");
1589		return -EINVAL;
1590		break;
1591	}
1592
1593	if (cycle & VME_SUPER)
1594		val |= TSI148_LCSR_DSAT_SUP;
1595	if (cycle & VME_PROG)
1596		val |= TSI148_LCSR_DSAT_PGM;
1597
1598	*attr = cpu_to_be32(val);
1599
1600	return 0;
1601}
1602
1603static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1604	u32 aspace, u32 cycle, u32 dwidth)
1605{
1606	u32 val;
1607
1608	val = be32_to_cpu(*attr);
1609
1610	/* Setup 2eSST speeds */
1611	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1612	case VME_2eSST160:
1613		val |= TSI148_LCSR_DDAT_2eSSTM_160;
1614		break;
1615	case VME_2eSST267:
1616		val |= TSI148_LCSR_DDAT_2eSSTM_267;
1617		break;
1618	case VME_2eSST320:
1619		val |= TSI148_LCSR_DDAT_2eSSTM_320;
1620		break;
1621	}
1622
1623	/* Setup cycle types */
1624	if (cycle & VME_SCT)
1625		val |= TSI148_LCSR_DDAT_TM_SCT;
1626
1627	if (cycle & VME_BLT)
1628		val |= TSI148_LCSR_DDAT_TM_BLT;
1629
1630	if (cycle & VME_MBLT)
1631		val |= TSI148_LCSR_DDAT_TM_MBLT;
1632
1633	if (cycle & VME_2eVME)
1634		val |= TSI148_LCSR_DDAT_TM_2eVME;
1635
1636	if (cycle & VME_2eSST)
1637		val |= TSI148_LCSR_DDAT_TM_2eSST;
1638
1639	if (cycle & VME_2eSSTB) {
1640		dev_err(dev, "Currently not setting Broadcast Select "
1641			"Registers\n");
1642		val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1643	}
1644
1645	/* Setup data width */
1646	switch (dwidth) {
1647	case VME_D16:
1648		val |= TSI148_LCSR_DDAT_DBW_16;
1649		break;
1650	case VME_D32:
1651		val |= TSI148_LCSR_DDAT_DBW_32;
1652		break;
1653	default:
1654		dev_err(dev, "Invalid data width\n");
1655		return -EINVAL;
1656	}
1657
1658	/* Setup address space */
1659	switch (aspace) {
1660	case VME_A16:
1661		val |= TSI148_LCSR_DDAT_AMODE_A16;
1662		break;
1663	case VME_A24:
1664		val |= TSI148_LCSR_DDAT_AMODE_A24;
1665		break;
1666	case VME_A32:
1667		val |= TSI148_LCSR_DDAT_AMODE_A32;
1668		break;
1669	case VME_A64:
1670		val |= TSI148_LCSR_DDAT_AMODE_A64;
1671		break;
1672	case VME_CRCSR:
1673		val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1674		break;
1675	case VME_USER1:
1676		val |= TSI148_LCSR_DDAT_AMODE_USER1;
1677		break;
1678	case VME_USER2:
1679		val |= TSI148_LCSR_DDAT_AMODE_USER2;
1680		break;
1681	case VME_USER3:
1682		val |= TSI148_LCSR_DDAT_AMODE_USER3;
1683		break;
1684	case VME_USER4:
1685		val |= TSI148_LCSR_DDAT_AMODE_USER4;
1686		break;
1687	default:
1688		dev_err(dev, "Invalid address space\n");
1689		return -EINVAL;
1690		break;
1691	}
1692
1693	if (cycle & VME_SUPER)
1694		val |= TSI148_LCSR_DDAT_SUP;
1695	if (cycle & VME_PROG)
1696		val |= TSI148_LCSR_DDAT_PGM;
1697
1698	*attr = cpu_to_be32(val);
1699
1700	return 0;
1701}
1702
1703/*
1704 * Add a link list descriptor to the list
1705 *
1706 * Note: DMA engine expects the DMA descriptor to be big endian.
1707 */
1708static int tsi148_dma_list_add(struct vme_dma_list *list,
1709	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1710{
1711	struct tsi148_dma_entry *entry, *prev;
1712	u32 address_high, address_low, val;
1713	struct vme_dma_pattern *pattern_attr;
1714	struct vme_dma_pci *pci_attr;
1715	struct vme_dma_vme *vme_attr;
1716	int retval = 0;
1717	struct vme_bridge *tsi148_bridge;
1718
1719	tsi148_bridge = list->parent->parent;
1720
1721	/* Descriptor must be aligned on 64-bit boundaries */
1722	entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1723	if (entry == NULL) {
1724		dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1725			"dma resource structure\n");
1726		retval = -ENOMEM;
1727		goto err_mem;
1728	}
1729
1730	/* Test descriptor alignment */
1731	if ((unsigned long)&entry->descriptor & 0x7) {
1732		dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1733			"byte boundary as required: %p\n",
1734			&entry->descriptor);
1735		retval = -EINVAL;
1736		goto err_align;
1737	}
1738
1739	/* Given we are going to fill out the structure, we probably don't
1740	 * need to zero it, but better safe than sorry for now.
1741	 */
1742	memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1743
1744	/* Fill out source part */
1745	switch (src->type) {
1746	case VME_DMA_PATTERN:
1747		pattern_attr = src->private;
1748
1749		entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1750
1751		val = TSI148_LCSR_DSAT_TYP_PAT;
1752
1753		/* Default behaviour is 32 bit pattern */
1754		if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1755			val |= TSI148_LCSR_DSAT_PSZ;
1756
1757		/* It seems that the default behaviour is to increment */
1758		if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1759			val |= TSI148_LCSR_DSAT_NIN;
1760		entry->descriptor.dsat = cpu_to_be32(val);
1761		break;
1762	case VME_DMA_PCI:
1763		pci_attr = src->private;
1764
1765		reg_split((unsigned long long)pci_attr->address, &address_high,
1766			&address_low);
1767		entry->descriptor.dsau = cpu_to_be32(address_high);
1768		entry->descriptor.dsal = cpu_to_be32(address_low);
1769		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1770		break;
1771	case VME_DMA_VME:
1772		vme_attr = src->private;
1773
1774		reg_split((unsigned long long)vme_attr->address, &address_high,
1775			&address_low);
1776		entry->descriptor.dsau = cpu_to_be32(address_high);
1777		entry->descriptor.dsal = cpu_to_be32(address_low);
1778		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1779
1780		retval = tsi148_dma_set_vme_src_attributes(
1781			tsi148_bridge->parent, &entry->descriptor.dsat,
1782			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1783		if (retval < 0)
1784			goto err_source;
1785		break;
1786	default:
1787		dev_err(tsi148_bridge->parent, "Invalid source type\n");
1788		retval = -EINVAL;
1789		goto err_source;
1790		break;
1791	}
1792
1793	/* Assume last link - this will be over-written by adding another */
1794	entry->descriptor.dnlau = cpu_to_be32(0);
1795	entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1796
1797	/* Fill out destination part */
1798	switch (dest->type) {
1799	case VME_DMA_PCI:
1800		pci_attr = dest->private;
1801
1802		reg_split((unsigned long long)pci_attr->address, &address_high,
1803			&address_low);
1804		entry->descriptor.ddau = cpu_to_be32(address_high);
1805		entry->descriptor.ddal = cpu_to_be32(address_low);
1806		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1807		break;
1808	case VME_DMA_VME:
1809		vme_attr = dest->private;
1810
1811		reg_split((unsigned long long)vme_attr->address, &address_high,
1812			&address_low);
1813		entry->descriptor.ddau = cpu_to_be32(address_high);
1814		entry->descriptor.ddal = cpu_to_be32(address_low);
1815		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1816
1817		retval = tsi148_dma_set_vme_dest_attributes(
1818			tsi148_bridge->parent, &entry->descriptor.ddat,
1819			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1820		if (retval < 0)
1821			goto err_dest;
1822		break;
1823	default:
1824		dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1825		retval = -EINVAL;
1826		goto err_dest;
1827		break;
1828	}
1829
1830	/* Fill out count */
1831	entry->descriptor.dcnt = cpu_to_be32((u32)count);
1832
1833	/* Add to list */
1834	list_add_tail(&entry->list, &list->entries);
1835
1836	/* Fill out previous descriptors "Next Address" */
1837	if (entry->list.prev != &list->entries) {
1838		prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1839			list);
1840		/* We need the bus address for the pointer */
1841		entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1842			&entry->descriptor,
1843			sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1844
1845		reg_split((unsigned long long)entry->dma_handle, &address_high,
1846			&address_low);
1847		entry->descriptor.dnlau = cpu_to_be32(address_high);
1848		entry->descriptor.dnlal = cpu_to_be32(address_low);
1849
1850	}
1851
1852	return 0;
1853
1854err_dest:
1855err_source:
1856err_align:
1857		kfree(entry);
1858err_mem:
1859	return retval;
1860}
1861
1862/*
1863 * Check to see if the provided DMA channel is busy.
1864 */
1865static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1866{
1867	u32 tmp;
1868	struct tsi148_driver *bridge;
1869
1870	bridge = tsi148_bridge->driver_priv;
1871
1872	tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1873		TSI148_LCSR_OFFSET_DSTA);
1874
1875	if (tmp & TSI148_LCSR_DSTA_BSY)
1876		return 0;
1877	else
1878		return 1;
1879
1880}
1881
1882/*
1883 * Execute a previously generated link list
1884 *
1885 * XXX Need to provide control register configuration.
1886 */
1887static int tsi148_dma_list_exec(struct vme_dma_list *list)
1888{
1889	struct vme_dma_resource *ctrlr;
1890	int channel, retval = 0;
1891	struct tsi148_dma_entry *entry;
1892	u32 bus_addr_high, bus_addr_low;
1893	u32 val, dctlreg = 0;
1894	struct vme_bridge *tsi148_bridge;
1895	struct tsi148_driver *bridge;
1896
1897	ctrlr = list->parent;
1898
1899	tsi148_bridge = ctrlr->parent;
1900
1901	bridge = tsi148_bridge->driver_priv;
1902
1903	mutex_lock(&ctrlr->mtx);
1904
1905	channel = ctrlr->number;
1906
1907	if (!list_empty(&ctrlr->running)) {
1908		/*
1909		 * XXX We have an active DMA transfer and currently haven't
1910		 *     sorted out the mechanism for "pending" DMA transfers.
1911		 *     Return busy.
1912		 */
1913		/* Need to add to pending here */
1914		mutex_unlock(&ctrlr->mtx);
1915		return -EBUSY;
1916	} else {
1917		list_add(&list->list, &ctrlr->running);
1918	}
1919
1920	/* Get first bus address and write into registers */
1921	entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1922		list);
1923
1924	entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1925		&entry->descriptor,
1926		sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1927
1928	mutex_unlock(&ctrlr->mtx);
1929
1930	reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1931
1932	iowrite32be(bus_addr_high, bridge->base +
1933		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1934	iowrite32be(bus_addr_low, bridge->base +
1935		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1936
1937	dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1938		TSI148_LCSR_OFFSET_DCTL);
1939
1940	/* Start the operation */
1941	iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1942		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1943
1944	wait_event_interruptible(bridge->dma_queue[channel],
1945		tsi148_dma_busy(ctrlr->parent, channel));
1946
1947	/*
1948	 * Read status register, this register is valid until we kick off a
1949	 * new transfer.
1950	 */
1951	val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1952		TSI148_LCSR_OFFSET_DSTA);
1953
1954	if (val & TSI148_LCSR_DSTA_VBE) {
1955		dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1956		retval = -EIO;
1957	}
1958
1959	/* Remove list from running list */
1960	mutex_lock(&ctrlr->mtx);
1961	list_del(&list->list);
1962	mutex_unlock(&ctrlr->mtx);
1963
1964	return retval;
1965}
1966
1967/*
1968 * Clean up a previously generated link list
1969 *
1970 * We have a separate function, don't assume that the chain can't be reused.
1971 */
1972static int tsi148_dma_list_empty(struct vme_dma_list *list)
1973{
1974	struct list_head *pos, *temp;
1975	struct tsi148_dma_entry *entry;
1976
1977	struct vme_bridge *tsi148_bridge = list->parent->parent;
1978
1979	/* detach and free each entry */
1980	list_for_each_safe(pos, temp, &list->entries) {
1981		list_del(pos);
1982		entry = list_entry(pos, struct tsi148_dma_entry, list);
1983
1984		dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1985			sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1986		kfree(entry);
1987	}
1988
1989	return 0;
1990}
1991
1992/*
1993 * All 4 location monitors reside at the same base - this is therefore a
1994 * system wide configuration.
1995 *
1996 * This does not enable the LM monitor - that should be done when the first
1997 * callback is attached and disabled when the last callback is removed.
1998 */
1999static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
2000	u32 aspace, u32 cycle)
2001{
2002	u32 lm_base_high, lm_base_low, lm_ctl = 0;
2003	int i;
2004	struct vme_bridge *tsi148_bridge;
2005	struct tsi148_driver *bridge;
2006
2007	tsi148_bridge = lm->parent;
2008
2009	bridge = tsi148_bridge->driver_priv;
2010
2011	mutex_lock(&lm->mtx);
2012
2013	/* If we already have a callback attached, we can't move it! */
2014	for (i = 0; i < lm->monitors; i++) {
2015		if (bridge->lm_callback[i] != NULL) {
2016			mutex_unlock(&lm->mtx);
2017			dev_err(tsi148_bridge->parent, "Location monitor "
2018				"callback attached, can't reset\n");
2019			return -EBUSY;
2020		}
2021	}
2022
2023	switch (aspace) {
2024	case VME_A16:
2025		lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
2026		break;
2027	case VME_A24:
2028		lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
2029		break;
2030	case VME_A32:
2031		lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
2032		break;
2033	case VME_A64:
2034		lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
2035		break;
2036	default:
2037		mutex_unlock(&lm->mtx);
2038		dev_err(tsi148_bridge->parent, "Invalid address space\n");
2039		return -EINVAL;
2040		break;
2041	}
2042
2043	if (cycle & VME_SUPER)
2044		lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
2045	if (cycle & VME_USER)
2046		lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
2047	if (cycle & VME_PROG)
2048		lm_ctl |= TSI148_LCSR_LMAT_PGM;
2049	if (cycle & VME_DATA)
2050		lm_ctl |= TSI148_LCSR_LMAT_DATA;
2051
2052	reg_split(lm_base, &lm_base_high, &lm_base_low);
2053
2054	iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
2055	iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
2056	iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2057
2058	mutex_unlock(&lm->mtx);
2059
2060	return 0;
2061}
2062
2063/* Get configuration of the callback monitor and return whether it is enabled
2064 * or disabled.
2065 */
2066static int tsi148_lm_get(struct vme_lm_resource *lm,
2067	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
2068{
2069	u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2070	struct tsi148_driver *bridge;
2071
2072	bridge = lm->parent->driver_priv;
2073
2074	mutex_lock(&lm->mtx);
2075
2076	lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
2077	lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
2078	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2079
2080	reg_join(lm_base_high, lm_base_low, lm_base);
2081
2082	if (lm_ctl & TSI148_LCSR_LMAT_EN)
2083		enabled = 1;
2084
2085	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2086		*aspace |= VME_A16;
2087
2088	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2089		*aspace |= VME_A24;
2090
2091	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2092		*aspace |= VME_A32;
2093
2094	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2095		*aspace |= VME_A64;
2096
2097
2098	if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2099		*cycle |= VME_SUPER;
2100	if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2101		*cycle |= VME_USER;
2102	if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2103		*cycle |= VME_PROG;
2104	if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2105		*cycle |= VME_DATA;
2106
2107	mutex_unlock(&lm->mtx);
2108
2109	return enabled;
2110}
2111
2112/*
2113 * Attach a callback to a specific location monitor.
2114 *
2115 * Callback will be passed the monitor triggered.
2116 */
2117static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2118	void (*callback)(int))
2119{
2120	u32 lm_ctl, tmp;
2121	struct vme_bridge *tsi148_bridge;
2122	struct tsi148_driver *bridge;
2123
2124	tsi148_bridge = lm->parent;
2125
2126	bridge = tsi148_bridge->driver_priv;
2127
2128	mutex_lock(&lm->mtx);
2129
2130	/* Ensure that the location monitor is configured - need PGM or DATA */
2131	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2132	if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2133		mutex_unlock(&lm->mtx);
2134		dev_err(tsi148_bridge->parent, "Location monitor not properly "
2135			"configured\n");
2136		return -EINVAL;
2137	}
2138
2139	/* Check that a callback isn't already attached */
2140	if (bridge->lm_callback[monitor] != NULL) {
2141		mutex_unlock(&lm->mtx);
2142		dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2143		return -EBUSY;
2144	}
2145
2146	/* Attach callback */
2147	bridge->lm_callback[monitor] = callback;
2148
2149	/* Enable Location Monitor interrupt */
2150	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2151	tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2152	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2153
2154	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2155	tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2156	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2157
2158	/* Ensure that global Location Monitor Enable set */
2159	if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2160		lm_ctl |= TSI148_LCSR_LMAT_EN;
2161		iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2162	}
2163
2164	mutex_unlock(&lm->mtx);
2165
2166	return 0;
2167}
2168
2169/*
2170 * Detach a callback function forn a specific location monitor.
2171 */
2172static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2173{
2174	u32 lm_en, tmp;
2175	struct tsi148_driver *bridge;
2176
2177	bridge = lm->parent->driver_priv;
2178
2179	mutex_lock(&lm->mtx);
2180
2181	/* Disable Location Monitor and ensure previous interrupts are clear */
2182	lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2183	lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2184	iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2185
2186	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2187	tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2188	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2189
2190	iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2191		 bridge->base + TSI148_LCSR_INTC);
2192
2193	/* Detach callback */
2194	bridge->lm_callback[monitor] = NULL;
2195
2196	/* If all location monitors disabled, disable global Location Monitor */
2197	if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2198			TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2199		tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2200		tmp &= ~TSI148_LCSR_LMAT_EN;
2201		iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2202	}
2203
2204	mutex_unlock(&lm->mtx);
2205
2206	return 0;
2207}
2208
2209/*
2210 * Determine Geographical Addressing
2211 */
2212static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2213{
2214	u32 slot = 0;
2215	struct tsi148_driver *bridge;
2216
2217	bridge = tsi148_bridge->driver_priv;
2218
2219	if (!geoid) {
2220		slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2221		slot = slot & TSI148_LCSR_VSTAT_GA_M;
2222	} else
2223		slot = geoid;
2224
2225	return (int)slot;
2226}
2227
2228static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2229	dma_addr_t *dma)
2230{
2231	struct pci_dev *pdev;
2232
2233	/* Find pci_dev container of dev */
2234	pdev = to_pci_dev(parent);
2235
2236	return pci_alloc_consistent(pdev, size, dma);
2237}
2238
2239static void tsi148_free_consistent(struct device *parent, size_t size,
2240	void *vaddr, dma_addr_t dma)
2241{
2242	struct pci_dev *pdev;
2243
2244	/* Find pci_dev container of dev */
2245	pdev = to_pci_dev(parent);
2246
2247	pci_free_consistent(pdev, size, vaddr, dma);
2248}
2249
2250/*
2251 * Configure CR/CSR space
2252 *
2253 * Access to the CR/CSR can be configured at power-up. The location of the
2254 * CR/CSR registers in the CR/CSR address space is determined by the boards
2255 * Auto-ID or Geographic address. This function ensures that the window is
2256 * enabled at an offset consistent with the boards geopgraphic address.
2257 *
2258 * Each board has a 512kB window, with the highest 4kB being used for the
2259 * boards registers, this means there is a fix length 508kB window which must
2260 * be mapped onto PCI memory.
2261 */
2262static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2263	struct pci_dev *pdev)
2264{
2265	u32 cbar, crat, vstat;
2266	u32 crcsr_bus_high, crcsr_bus_low;
2267	int retval;
2268	struct tsi148_driver *bridge;
2269
2270	bridge = tsi148_bridge->driver_priv;
2271
2272	/* Allocate mem for CR/CSR image */
2273	bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2274						     &bridge->crcsr_bus);
2275	if (bridge->crcsr_kernel == NULL) {
2276		dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2277			"CR/CSR image\n");
2278		return -ENOMEM;
2279	}
2280
2281	reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2282
2283	iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2284	iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2285
2286	/* Ensure that the CR/CSR is configured at the correct offset */
2287	cbar = ioread32be(bridge->base + TSI148_CBAR);
2288	cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2289
2290	vstat = tsi148_slot_get(tsi148_bridge);
2291
2292	if (cbar != vstat) {
2293		cbar = vstat;
2294		dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2295		iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2296	}
2297	dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2298
2299	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2300	if (crat & TSI148_LCSR_CRAT_EN)
2301		dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2302	else {
2303		dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2304		iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2305			bridge->base + TSI148_LCSR_CRAT);
2306	}
2307
2308	/* If we want flushed, error-checked writes, set up a window
2309	 * over the CR/CSR registers. We read from here to safely flush
2310	 * through VME writes.
2311	 */
2312	if (err_chk) {
2313		retval = tsi148_master_set(bridge->flush_image, 1,
2314			(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2315			VME_D16);
2316		if (retval)
2317			dev_err(tsi148_bridge->parent, "Configuring flush image"
2318				" failed\n");
2319	}
2320
2321	return 0;
2322
2323}
2324
2325static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2326	struct pci_dev *pdev)
2327{
2328	u32 crat;
2329	struct tsi148_driver *bridge;
2330
2331	bridge = tsi148_bridge->driver_priv;
2332
2333	/* Turn off CR/CSR space */
2334	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2335	iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2336		bridge->base + TSI148_LCSR_CRAT);
2337
2338	/* Free image */
2339	iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2340	iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2341
2342	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2343		bridge->crcsr_bus);
2344}
2345
2346static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2347{
2348	int retval, i, master_num;
2349	u32 data;
2350	struct list_head *pos = NULL, *n;
2351	struct vme_bridge *tsi148_bridge;
2352	struct tsi148_driver *tsi148_device;
2353	struct vme_master_resource *master_image;
2354	struct vme_slave_resource *slave_image;
2355	struct vme_dma_resource *dma_ctrlr;
2356	struct vme_lm_resource *lm;
2357
2358	/* If we want to support more than one of each bridge, we need to
2359	 * dynamically generate this so we get one per device
2360	 */
2361	tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2362	if (tsi148_bridge == NULL) {
2363		dev_err(&pdev->dev, "Failed to allocate memory for device "
2364			"structure\n");
2365		retval = -ENOMEM;
2366		goto err_struct;
2367	}
2368
2369	tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2370	if (tsi148_device == NULL) {
2371		dev_err(&pdev->dev, "Failed to allocate memory for device "
2372			"structure\n");
2373		retval = -ENOMEM;
2374		goto err_driver;
2375	}
2376
2377	tsi148_bridge->driver_priv = tsi148_device;
2378
2379	/* Enable the device */
2380	retval = pci_enable_device(pdev);
2381	if (retval) {
2382		dev_err(&pdev->dev, "Unable to enable device\n");
2383		goto err_enable;
2384	}
2385
2386	/* Map Registers */
2387	retval = pci_request_regions(pdev, driver_name);
2388	if (retval) {
2389		dev_err(&pdev->dev, "Unable to reserve resources\n");
2390		goto err_resource;
2391	}
2392
2393	/* map registers in BAR 0 */
2394	tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2395		4096);
2396	if (!tsi148_device->base) {
2397		dev_err(&pdev->dev, "Unable to remap CRG region\n");
2398		retval = -EIO;
2399		goto err_remap;
2400	}
2401
2402	/* Check to see if the mapping worked out */
2403	data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2404	if (data != PCI_VENDOR_ID_TUNDRA) {
2405		dev_err(&pdev->dev, "CRG region check failed\n");
2406		retval = -EIO;
2407		goto err_test;
2408	}
2409
2410	/* Initialize wait queues & mutual exclusion flags */
2411	init_waitqueue_head(&tsi148_device->dma_queue[0]);
2412	init_waitqueue_head(&tsi148_device->dma_queue[1]);
2413	init_waitqueue_head(&tsi148_device->iack_queue);
2414	mutex_init(&tsi148_device->vme_int);
2415	mutex_init(&tsi148_device->vme_rmw);
2416
2417	tsi148_bridge->parent = &pdev->dev;
2418	strcpy(tsi148_bridge->name, driver_name);
2419
2420	/* Setup IRQ */
2421	retval = tsi148_irq_init(tsi148_bridge);
2422	if (retval != 0) {
2423		dev_err(&pdev->dev, "Chip Initialization failed.\n");
2424		goto err_irq;
2425	}
2426
2427	/* If we are going to flush writes, we need to read from the VME bus.
2428	 * We need to do this safely, thus we read the devices own CR/CSR
2429	 * register. To do this we must set up a window in CR/CSR space and
2430	 * hence have one less master window resource available.
2431	 */
2432	master_num = TSI148_MAX_MASTER;
2433	if (err_chk) {
2434		master_num--;
2435
2436		tsi148_device->flush_image =
2437			kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2438		if (tsi148_device->flush_image == NULL) {
2439			dev_err(&pdev->dev, "Failed to allocate memory for "
2440			"flush resource structure\n");
2441			retval = -ENOMEM;
2442			goto err_master;
2443		}
2444		tsi148_device->flush_image->parent = tsi148_bridge;
2445		spin_lock_init(&tsi148_device->flush_image->lock);
2446		tsi148_device->flush_image->locked = 1;
2447		tsi148_device->flush_image->number = master_num;
2448		memset(&tsi148_device->flush_image->bus_resource, 0,
2449			sizeof(struct resource));
2450		tsi148_device->flush_image->kern_base  = NULL;
2451	}
2452
2453	/* Add master windows to list */
2454	INIT_LIST_HEAD(&tsi148_bridge->master_resources);
2455	for (i = 0; i < master_num; i++) {
2456		master_image = kmalloc(sizeof(struct vme_master_resource),
2457			GFP_KERNEL);
2458		if (master_image == NULL) {
2459			dev_err(&pdev->dev, "Failed to allocate memory for "
2460			"master resource structure\n");
2461			retval = -ENOMEM;
2462			goto err_master;
2463		}
2464		master_image->parent = tsi148_bridge;
2465		spin_lock_init(&master_image->lock);
2466		master_image->locked = 0;
2467		master_image->number = i;
2468		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2469			VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2470			VME_USER3 | VME_USER4;
2471		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2472			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2473			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2474			VME_PROG | VME_DATA;
2475		master_image->width_attr = VME_D16 | VME_D32;
2476		memset(&master_image->bus_resource, 0,
2477			sizeof(struct resource));
2478		master_image->kern_base  = NULL;
2479		list_add_tail(&master_image->list,
2480			&tsi148_bridge->master_resources);
2481	}
2482
2483	/* Add slave windows to list */
2484	INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
2485	for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2486		slave_image = kmalloc(sizeof(struct vme_slave_resource),
2487			GFP_KERNEL);
2488		if (slave_image == NULL) {
2489			dev_err(&pdev->dev, "Failed to allocate memory for "
2490			"slave resource structure\n");
2491			retval = -ENOMEM;
2492			goto err_slave;
2493		}
2494		slave_image->parent = tsi148_bridge;
2495		mutex_init(&slave_image->mtx);
2496		slave_image->locked = 0;
2497		slave_image->number = i;
2498		slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2499			VME_A64;
2500		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2501			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2502			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2503			VME_PROG | VME_DATA;
2504		list_add_tail(&slave_image->list,
2505			&tsi148_bridge->slave_resources);
2506	}
2507
2508	/* Add dma engines to list */
2509	INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
2510	for (i = 0; i < TSI148_MAX_DMA; i++) {
2511		dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2512			GFP_KERNEL);
2513		if (dma_ctrlr == NULL) {
2514			dev_err(&pdev->dev, "Failed to allocate memory for "
2515			"dma resource structure\n");
2516			retval = -ENOMEM;
2517			goto err_dma;
2518		}
2519		dma_ctrlr->parent = tsi148_bridge;
2520		mutex_init(&dma_ctrlr->mtx);
2521		dma_ctrlr->locked = 0;
2522		dma_ctrlr->number = i;
2523		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2524			VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2525			VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2526			VME_DMA_PATTERN_TO_MEM;
2527		INIT_LIST_HEAD(&dma_ctrlr->pending);
2528		INIT_LIST_HEAD(&dma_ctrlr->running);
2529		list_add_tail(&dma_ctrlr->list,
2530			&tsi148_bridge->dma_resources);
2531	}
2532
2533	/* Add location monitor to list */
2534	INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
2535	lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2536	if (lm == NULL) {
2537		dev_err(&pdev->dev, "Failed to allocate memory for "
2538		"location monitor resource structure\n");
2539		retval = -ENOMEM;
2540		goto err_lm;
2541	}
2542	lm->parent = tsi148_bridge;
2543	mutex_init(&lm->mtx);
2544	lm->locked = 0;
2545	lm->number = 1;
2546	lm->monitors = 4;
2547	list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2548
2549	tsi148_bridge->slave_get = tsi148_slave_get;
2550	tsi148_bridge->slave_set = tsi148_slave_set;
2551	tsi148_bridge->master_get = tsi148_master_get;
2552	tsi148_bridge->master_set = tsi148_master_set;
2553	tsi148_bridge->master_read = tsi148_master_read;
2554	tsi148_bridge->master_write = tsi148_master_write;
2555	tsi148_bridge->master_rmw = tsi148_master_rmw;
2556	tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2557	tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2558	tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2559	tsi148_bridge->irq_set = tsi148_irq_set;
2560	tsi148_bridge->irq_generate = tsi148_irq_generate;
2561	tsi148_bridge->lm_set = tsi148_lm_set;
2562	tsi148_bridge->lm_get = tsi148_lm_get;
2563	tsi148_bridge->lm_attach = tsi148_lm_attach;
2564	tsi148_bridge->lm_detach = tsi148_lm_detach;
2565	tsi148_bridge->slot_get = tsi148_slot_get;
2566	tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2567	tsi148_bridge->free_consistent = tsi148_free_consistent;
2568
2569	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2570	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2571		(data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2572	if (!geoid)
2573		dev_info(&pdev->dev, "VME geographical address is %d\n",
2574			data & TSI148_LCSR_VSTAT_GA_M);
2575	else
2576		dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2577			geoid);
2578
2579	dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2580		err_chk ? "enabled" : "disabled");
2581
2582	retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2583	if (retval) {
2584		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2585		goto err_crcsr;
2586	}
2587
2588	retval = vme_register_bridge(tsi148_bridge);
2589	if (retval != 0) {
2590		dev_err(&pdev->dev, "Chip Registration failed.\n");
2591		goto err_reg;
2592	}
2593
2594	pci_set_drvdata(pdev, tsi148_bridge);
2595
2596	/* Clear VME bus "board fail", and "power-up reset" lines */
2597	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2598	data &= ~TSI148_LCSR_VSTAT_BRDFL;
2599	data |= TSI148_LCSR_VSTAT_CPURST;
2600	iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2601
2602	return 0;
2603
2604err_reg:
2605	tsi148_crcsr_exit(tsi148_bridge, pdev);
2606err_crcsr:
2607err_lm:
2608	/* resources are stored in link list */
2609	list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2610		lm = list_entry(pos, struct vme_lm_resource, list);
2611		list_del(pos);
2612		kfree(lm);
2613	}
2614err_dma:
2615	/* resources are stored in link list */
2616	list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2617		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2618		list_del(pos);
2619		kfree(dma_ctrlr);
2620	}
2621err_slave:
2622	/* resources are stored in link list */
2623	list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2624		slave_image = list_entry(pos, struct vme_slave_resource, list);
2625		list_del(pos);
2626		kfree(slave_image);
2627	}
2628err_master:
2629	/* resources are stored in link list */
2630	list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2631		master_image = list_entry(pos, struct vme_master_resource,
2632			list);
2633		list_del(pos);
2634		kfree(master_image);
2635	}
2636
2637	tsi148_irq_exit(tsi148_bridge, pdev);
2638err_irq:
2639err_test:
2640	iounmap(tsi148_device->base);
2641err_remap:
2642	pci_release_regions(pdev);
2643err_resource:
2644	pci_disable_device(pdev);
2645err_enable:
2646	kfree(tsi148_device);
2647err_driver:
2648	kfree(tsi148_bridge);
2649err_struct:
2650	return retval;
2651
2652}
2653
2654static void tsi148_remove(struct pci_dev *pdev)
2655{
2656	struct list_head *pos = NULL;
2657	struct list_head *tmplist;
2658	struct vme_master_resource *master_image;
2659	struct vme_slave_resource *slave_image;
2660	struct vme_dma_resource *dma_ctrlr;
2661	int i;
2662	struct tsi148_driver *bridge;
2663	struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2664
2665	bridge = tsi148_bridge->driver_priv;
2666
2667
2668	dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2669
2670	/*
2671	 *  Shutdown all inbound and outbound windows.
2672	 */
2673	for (i = 0; i < 8; i++) {
2674		iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2675			TSI148_LCSR_OFFSET_ITAT);
2676		iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2677			TSI148_LCSR_OFFSET_OTAT);
2678	}
2679
2680	/*
2681	 *  Shutdown Location monitor.
2682	 */
2683	iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2684
2685	/*
2686	 *  Shutdown CRG map.
2687	 */
2688	iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2689
2690	/*
2691	 *  Clear error status.
2692	 */
2693	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2694	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2695	iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2696
2697	/*
2698	 *  Remove VIRQ interrupt (if any)
2699	 */
2700	if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2701		iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2702
2703	/*
2704	 *  Map all Interrupts to PCI INTA
2705	 */
2706	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2707	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2708
2709	tsi148_irq_exit(tsi148_bridge, pdev);
2710
2711	vme_unregister_bridge(tsi148_bridge);
2712
2713	tsi148_crcsr_exit(tsi148_bridge, pdev);
2714
2715	/* resources are stored in link list */
2716	list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2717		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2718		list_del(pos);
2719		kfree(dma_ctrlr);
2720	}
2721
2722	/* resources are stored in link list */
2723	list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2724		slave_image = list_entry(pos, struct vme_slave_resource, list);
2725		list_del(pos);
2726		kfree(slave_image);
2727	}
2728
2729	/* resources are stored in link list */
2730	list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2731		master_image = list_entry(pos, struct vme_master_resource,
2732			list);
2733		list_del(pos);
2734		kfree(master_image);
2735	}
2736
2737	iounmap(bridge->base);
2738
2739	pci_release_regions(pdev);
2740
2741	pci_disable_device(pdev);
2742
2743	kfree(tsi148_bridge->driver_priv);
2744
2745	kfree(tsi148_bridge);
2746}
2747
2748module_pci_driver(tsi148_driver);
2749
2750MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2751module_param(err_chk, bool, 0);
2752
2753MODULE_PARM_DESC(geoid, "Override geographical addressing");
2754module_param(geoid, int, 0);
2755
2756MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2757MODULE_LICENSE("GPL");
2758