1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#undef DEBUG
24
25#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/export.h>
28#include <linux/pagemap.h>
29#include <linux/poll.h>
30#include <linux/ptrace.h>
31#include <linux/seq_file.h>
32#include <linux/slab.h>
33
34#include <asm/io.h>
35#include <asm/time.h>
36#include <asm/spu.h>
37#include <asm/spu_info.h>
38#include <asm/uaccess.h>
39
40#include "spufs.h"
41#include "sputrace.h"
42
43#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44
45/* Simple attribute files */
46struct spufs_attr {
47	int (*get)(void *, u64 *);
48	int (*set)(void *, u64);
49	char get_buf[24];       /* enough to store a u64 and "\n\0" */
50	char set_buf[24];
51	void *data;
52	const char *fmt;        /* format for read operation */
53	struct mutex mutex;     /* protects access to these buffers */
54};
55
56static int spufs_attr_open(struct inode *inode, struct file *file,
57		int (*get)(void *, u64 *), int (*set)(void *, u64),
58		const char *fmt)
59{
60	struct spufs_attr *attr;
61
62	attr = kmalloc(sizeof(*attr), GFP_KERNEL);
63	if (!attr)
64		return -ENOMEM;
65
66	attr->get = get;
67	attr->set = set;
68	attr->data = inode->i_private;
69	attr->fmt = fmt;
70	mutex_init(&attr->mutex);
71	file->private_data = attr;
72
73	return nonseekable_open(inode, file);
74}
75
76static int spufs_attr_release(struct inode *inode, struct file *file)
77{
78       kfree(file->private_data);
79	return 0;
80}
81
82static ssize_t spufs_attr_read(struct file *file, char __user *buf,
83		size_t len, loff_t *ppos)
84{
85	struct spufs_attr *attr;
86	size_t size;
87	ssize_t ret;
88
89	attr = file->private_data;
90	if (!attr->get)
91		return -EACCES;
92
93	ret = mutex_lock_interruptible(&attr->mutex);
94	if (ret)
95		return ret;
96
97	if (*ppos) {		/* continued read */
98		size = strlen(attr->get_buf);
99	} else {		/* first read */
100		u64 val;
101		ret = attr->get(attr->data, &val);
102		if (ret)
103			goto out;
104
105		size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
106				 attr->fmt, (unsigned long long)val);
107	}
108
109	ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
110out:
111	mutex_unlock(&attr->mutex);
112	return ret;
113}
114
115static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
116		size_t len, loff_t *ppos)
117{
118	struct spufs_attr *attr;
119	u64 val;
120	size_t size;
121	ssize_t ret;
122
123	attr = file->private_data;
124	if (!attr->set)
125		return -EACCES;
126
127	ret = mutex_lock_interruptible(&attr->mutex);
128	if (ret)
129		return ret;
130
131	ret = -EFAULT;
132	size = min(sizeof(attr->set_buf) - 1, len);
133	if (copy_from_user(attr->set_buf, buf, size))
134		goto out;
135
136	ret = len; /* claim we got the whole input */
137	attr->set_buf[size] = '\0';
138	val = simple_strtol(attr->set_buf, NULL, 0);
139	attr->set(attr->data, val);
140out:
141	mutex_unlock(&attr->mutex);
142	return ret;
143}
144
145#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)	\
146static int __fops ## _open(struct inode *inode, struct file *file)	\
147{									\
148	__simple_attr_check_format(__fmt, 0ull);			\
149	return spufs_attr_open(inode, file, __get, __set, __fmt);	\
150}									\
151static const struct file_operations __fops = {				\
152	.open	 = __fops ## _open,					\
153	.release = spufs_attr_release,					\
154	.read	 = spufs_attr_read,					\
155	.write	 = spufs_attr_write,					\
156	.llseek  = generic_file_llseek,					\
157};
158
159
160static int
161spufs_mem_open(struct inode *inode, struct file *file)
162{
163	struct spufs_inode_info *i = SPUFS_I(inode);
164	struct spu_context *ctx = i->i_ctx;
165
166	mutex_lock(&ctx->mapping_lock);
167	file->private_data = ctx;
168	if (!i->i_openers++)
169		ctx->local_store = inode->i_mapping;
170	mutex_unlock(&ctx->mapping_lock);
171	return 0;
172}
173
174static int
175spufs_mem_release(struct inode *inode, struct file *file)
176{
177	struct spufs_inode_info *i = SPUFS_I(inode);
178	struct spu_context *ctx = i->i_ctx;
179
180	mutex_lock(&ctx->mapping_lock);
181	if (!--i->i_openers)
182		ctx->local_store = NULL;
183	mutex_unlock(&ctx->mapping_lock);
184	return 0;
185}
186
187static ssize_t
188__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
189			size_t size, loff_t *pos)
190{
191	char *local_store = ctx->ops->get_ls(ctx);
192	return simple_read_from_buffer(buffer, size, pos, local_store,
193					LS_SIZE);
194}
195
196static ssize_t
197spufs_mem_read(struct file *file, char __user *buffer,
198				size_t size, loff_t *pos)
199{
200	struct spu_context *ctx = file->private_data;
201	ssize_t ret;
202
203	ret = spu_acquire(ctx);
204	if (ret)
205		return ret;
206	ret = __spufs_mem_read(ctx, buffer, size, pos);
207	spu_release(ctx);
208
209	return ret;
210}
211
212static ssize_t
213spufs_mem_write(struct file *file, const char __user *buffer,
214					size_t size, loff_t *ppos)
215{
216	struct spu_context *ctx = file->private_data;
217	char *local_store;
218	loff_t pos = *ppos;
219	int ret;
220
221	if (pos > LS_SIZE)
222		return -EFBIG;
223
224	ret = spu_acquire(ctx);
225	if (ret)
226		return ret;
227
228	local_store = ctx->ops->get_ls(ctx);
229	size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
230	spu_release(ctx);
231
232	return size;
233}
234
235static int
236spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
237{
238	struct spu_context *ctx	= vma->vm_file->private_data;
239	unsigned long address = (unsigned long)vmf->virtual_address;
240	unsigned long pfn, offset;
241
242#ifdef CONFIG_SPU_FS_64K_LS
243	struct spu_state *csa = &ctx->csa;
244	int psize;
245
246	/* Check what page size we are using */
247	psize = get_slice_psize(vma->vm_mm, address);
248
249	/* Some sanity checking */
250	BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
251
252	/* Wow, 64K, cool, we need to align the address though */
253	if (csa->use_big_pages) {
254		BUG_ON(vma->vm_start & 0xffff);
255		address &= ~0xfffful;
256	}
257#endif /* CONFIG_SPU_FS_64K_LS */
258
259	offset = vmf->pgoff << PAGE_SHIFT;
260	if (offset >= LS_SIZE)
261		return VM_FAULT_SIGBUS;
262
263	pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
264			address, offset);
265
266	if (spu_acquire(ctx))
267		return VM_FAULT_NOPAGE;
268
269	if (ctx->state == SPU_STATE_SAVED) {
270		vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
271		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
272	} else {
273		vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
274		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
275	}
276	vm_insert_pfn(vma, address, pfn);
277
278	spu_release(ctx);
279
280	return VM_FAULT_NOPAGE;
281}
282
283static int spufs_mem_mmap_access(struct vm_area_struct *vma,
284				unsigned long address,
285				void *buf, int len, int write)
286{
287	struct spu_context *ctx = vma->vm_file->private_data;
288	unsigned long offset = address - vma->vm_start;
289	char *local_store;
290
291	if (write && !(vma->vm_flags & VM_WRITE))
292		return -EACCES;
293	if (spu_acquire(ctx))
294		return -EINTR;
295	if ((offset + len) > vma->vm_end)
296		len = vma->vm_end - offset;
297	local_store = ctx->ops->get_ls(ctx);
298	if (write)
299		memcpy_toio(local_store + offset, buf, len);
300	else
301		memcpy_fromio(buf, local_store + offset, len);
302	spu_release(ctx);
303	return len;
304}
305
306static const struct vm_operations_struct spufs_mem_mmap_vmops = {
307	.fault = spufs_mem_mmap_fault,
308	.access = spufs_mem_mmap_access,
309};
310
311static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
312{
313#ifdef CONFIG_SPU_FS_64K_LS
314	struct spu_context	*ctx = file->private_data;
315	struct spu_state	*csa = &ctx->csa;
316
317	/* Sanity check VMA alignment */
318	if (csa->use_big_pages) {
319		pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
320			 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
321			 vma->vm_pgoff);
322		if (vma->vm_start & 0xffff)
323			return -EINVAL;
324		if (vma->vm_pgoff & 0xf)
325			return -EINVAL;
326	}
327#endif /* CONFIG_SPU_FS_64K_LS */
328
329	if (!(vma->vm_flags & VM_SHARED))
330		return -EINVAL;
331
332	vma->vm_flags |= VM_IO | VM_PFNMAP;
333	vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
334
335	vma->vm_ops = &spufs_mem_mmap_vmops;
336	return 0;
337}
338
339#ifdef CONFIG_SPU_FS_64K_LS
340static unsigned long spufs_get_unmapped_area(struct file *file,
341		unsigned long addr, unsigned long len, unsigned long pgoff,
342		unsigned long flags)
343{
344	struct spu_context	*ctx = file->private_data;
345	struct spu_state	*csa = &ctx->csa;
346
347	/* If not using big pages, fallback to normal MM g_u_a */
348	if (!csa->use_big_pages)
349		return current->mm->get_unmapped_area(file, addr, len,
350						      pgoff, flags);
351
352	/* Else, try to obtain a 64K pages slice */
353	return slice_get_unmapped_area(addr, len, flags,
354				       MMU_PAGE_64K, 1);
355}
356#endif /* CONFIG_SPU_FS_64K_LS */
357
358static const struct file_operations spufs_mem_fops = {
359	.open			= spufs_mem_open,
360	.release		= spufs_mem_release,
361	.read			= spufs_mem_read,
362	.write			= spufs_mem_write,
363	.llseek			= generic_file_llseek,
364	.mmap			= spufs_mem_mmap,
365#ifdef CONFIG_SPU_FS_64K_LS
366	.get_unmapped_area	= spufs_get_unmapped_area,
367#endif
368};
369
370static int spufs_ps_fault(struct vm_area_struct *vma,
371				    struct vm_fault *vmf,
372				    unsigned long ps_offs,
373				    unsigned long ps_size)
374{
375	struct spu_context *ctx = vma->vm_file->private_data;
376	unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
377	int ret = 0;
378
379	spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
380
381	if (offset >= ps_size)
382		return VM_FAULT_SIGBUS;
383
384	if (fatal_signal_pending(current))
385		return VM_FAULT_SIGBUS;
386
387	/*
388	 * Because we release the mmap_sem, the context may be destroyed while
389	 * we're in spu_wait. Grab an extra reference so it isn't destroyed
390	 * in the meantime.
391	 */
392	get_spu_context(ctx);
393
394	/*
395	 * We have to wait for context to be loaded before we have
396	 * pages to hand out to the user, but we don't want to wait
397	 * with the mmap_sem held.
398	 * It is possible to drop the mmap_sem here, but then we need
399	 * to return VM_FAULT_NOPAGE because the mappings may have
400	 * hanged.
401	 */
402	if (spu_acquire(ctx))
403		goto refault;
404
405	if (ctx->state == SPU_STATE_SAVED) {
406		up_read(&current->mm->mmap_sem);
407		spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
408		ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
409		spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
410		down_read(&current->mm->mmap_sem);
411	} else {
412		area = ctx->spu->problem_phys + ps_offs;
413		vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
414					(area + offset) >> PAGE_SHIFT);
415		spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
416	}
417
418	if (!ret)
419		spu_release(ctx);
420
421refault:
422	put_spu_context(ctx);
423	return VM_FAULT_NOPAGE;
424}
425
426#if SPUFS_MMAP_4K
427static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
428					   struct vm_fault *vmf)
429{
430	return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
431}
432
433static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
434	.fault = spufs_cntl_mmap_fault,
435};
436
437/*
438 * mmap support for problem state control area [0x4000 - 0x4fff].
439 */
440static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
441{
442	if (!(vma->vm_flags & VM_SHARED))
443		return -EINVAL;
444
445	vma->vm_flags |= VM_IO | VM_PFNMAP;
446	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
447
448	vma->vm_ops = &spufs_cntl_mmap_vmops;
449	return 0;
450}
451#else /* SPUFS_MMAP_4K */
452#define spufs_cntl_mmap NULL
453#endif /* !SPUFS_MMAP_4K */
454
455static int spufs_cntl_get(void *data, u64 *val)
456{
457	struct spu_context *ctx = data;
458	int ret;
459
460	ret = spu_acquire(ctx);
461	if (ret)
462		return ret;
463	*val = ctx->ops->status_read(ctx);
464	spu_release(ctx);
465
466	return 0;
467}
468
469static int spufs_cntl_set(void *data, u64 val)
470{
471	struct spu_context *ctx = data;
472	int ret;
473
474	ret = spu_acquire(ctx);
475	if (ret)
476		return ret;
477	ctx->ops->runcntl_write(ctx, val);
478	spu_release(ctx);
479
480	return 0;
481}
482
483static int spufs_cntl_open(struct inode *inode, struct file *file)
484{
485	struct spufs_inode_info *i = SPUFS_I(inode);
486	struct spu_context *ctx = i->i_ctx;
487
488	mutex_lock(&ctx->mapping_lock);
489	file->private_data = ctx;
490	if (!i->i_openers++)
491		ctx->cntl = inode->i_mapping;
492	mutex_unlock(&ctx->mapping_lock);
493	return simple_attr_open(inode, file, spufs_cntl_get,
494					spufs_cntl_set, "0x%08lx");
495}
496
497static int
498spufs_cntl_release(struct inode *inode, struct file *file)
499{
500	struct spufs_inode_info *i = SPUFS_I(inode);
501	struct spu_context *ctx = i->i_ctx;
502
503	simple_attr_release(inode, file);
504
505	mutex_lock(&ctx->mapping_lock);
506	if (!--i->i_openers)
507		ctx->cntl = NULL;
508	mutex_unlock(&ctx->mapping_lock);
509	return 0;
510}
511
512static const struct file_operations spufs_cntl_fops = {
513	.open = spufs_cntl_open,
514	.release = spufs_cntl_release,
515	.read = simple_attr_read,
516	.write = simple_attr_write,
517	.llseek	= generic_file_llseek,
518	.mmap = spufs_cntl_mmap,
519};
520
521static int
522spufs_regs_open(struct inode *inode, struct file *file)
523{
524	struct spufs_inode_info *i = SPUFS_I(inode);
525	file->private_data = i->i_ctx;
526	return 0;
527}
528
529static ssize_t
530__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
531			size_t size, loff_t *pos)
532{
533	struct spu_lscsa *lscsa = ctx->csa.lscsa;
534	return simple_read_from_buffer(buffer, size, pos,
535				      lscsa->gprs, sizeof lscsa->gprs);
536}
537
538static ssize_t
539spufs_regs_read(struct file *file, char __user *buffer,
540		size_t size, loff_t *pos)
541{
542	int ret;
543	struct spu_context *ctx = file->private_data;
544
545	/* pre-check for file position: if we'd return EOF, there's no point
546	 * causing a deschedule */
547	if (*pos >= sizeof(ctx->csa.lscsa->gprs))
548		return 0;
549
550	ret = spu_acquire_saved(ctx);
551	if (ret)
552		return ret;
553	ret = __spufs_regs_read(ctx, buffer, size, pos);
554	spu_release_saved(ctx);
555	return ret;
556}
557
558static ssize_t
559spufs_regs_write(struct file *file, const char __user *buffer,
560		 size_t size, loff_t *pos)
561{
562	struct spu_context *ctx = file->private_data;
563	struct spu_lscsa *lscsa = ctx->csa.lscsa;
564	int ret;
565
566	if (*pos >= sizeof(lscsa->gprs))
567		return -EFBIG;
568
569	ret = spu_acquire_saved(ctx);
570	if (ret)
571		return ret;
572
573	size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
574					buffer, size);
575
576	spu_release_saved(ctx);
577	return size;
578}
579
580static const struct file_operations spufs_regs_fops = {
581	.open	 = spufs_regs_open,
582	.read    = spufs_regs_read,
583	.write   = spufs_regs_write,
584	.llseek  = generic_file_llseek,
585};
586
587static ssize_t
588__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
589			size_t size, loff_t * pos)
590{
591	struct spu_lscsa *lscsa = ctx->csa.lscsa;
592	return simple_read_from_buffer(buffer, size, pos,
593				      &lscsa->fpcr, sizeof(lscsa->fpcr));
594}
595
596static ssize_t
597spufs_fpcr_read(struct file *file, char __user * buffer,
598		size_t size, loff_t * pos)
599{
600	int ret;
601	struct spu_context *ctx = file->private_data;
602
603	ret = spu_acquire_saved(ctx);
604	if (ret)
605		return ret;
606	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
607	spu_release_saved(ctx);
608	return ret;
609}
610
611static ssize_t
612spufs_fpcr_write(struct file *file, const char __user * buffer,
613		 size_t size, loff_t * pos)
614{
615	struct spu_context *ctx = file->private_data;
616	struct spu_lscsa *lscsa = ctx->csa.lscsa;
617	int ret;
618
619	if (*pos >= sizeof(lscsa->fpcr))
620		return -EFBIG;
621
622	ret = spu_acquire_saved(ctx);
623	if (ret)
624		return ret;
625
626	size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
627					buffer, size);
628
629	spu_release_saved(ctx);
630	return size;
631}
632
633static const struct file_operations spufs_fpcr_fops = {
634	.open = spufs_regs_open,
635	.read = spufs_fpcr_read,
636	.write = spufs_fpcr_write,
637	.llseek = generic_file_llseek,
638};
639
640/* generic open function for all pipe-like files */
641static int spufs_pipe_open(struct inode *inode, struct file *file)
642{
643	struct spufs_inode_info *i = SPUFS_I(inode);
644	file->private_data = i->i_ctx;
645
646	return nonseekable_open(inode, file);
647}
648
649/*
650 * Read as many bytes from the mailbox as possible, until
651 * one of the conditions becomes true:
652 *
653 * - no more data available in the mailbox
654 * - end of the user provided buffer
655 * - end of the mapped area
656 */
657static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
658			size_t len, loff_t *pos)
659{
660	struct spu_context *ctx = file->private_data;
661	u32 mbox_data, __user *udata;
662	ssize_t count;
663
664	if (len < 4)
665		return -EINVAL;
666
667	if (!access_ok(VERIFY_WRITE, buf, len))
668		return -EFAULT;
669
670	udata = (void __user *)buf;
671
672	count = spu_acquire(ctx);
673	if (count)
674		return count;
675
676	for (count = 0; (count + 4) <= len; count += 4, udata++) {
677		int ret;
678		ret = ctx->ops->mbox_read(ctx, &mbox_data);
679		if (ret == 0)
680			break;
681
682		/*
683		 * at the end of the mapped area, we can fault
684		 * but still need to return the data we have
685		 * read successfully so far.
686		 */
687		ret = __put_user(mbox_data, udata);
688		if (ret) {
689			if (!count)
690				count = -EFAULT;
691			break;
692		}
693	}
694	spu_release(ctx);
695
696	if (!count)
697		count = -EAGAIN;
698
699	return count;
700}
701
702static const struct file_operations spufs_mbox_fops = {
703	.open	= spufs_pipe_open,
704	.read	= spufs_mbox_read,
705	.llseek	= no_llseek,
706};
707
708static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
709			size_t len, loff_t *pos)
710{
711	struct spu_context *ctx = file->private_data;
712	ssize_t ret;
713	u32 mbox_stat;
714
715	if (len < 4)
716		return -EINVAL;
717
718	ret = spu_acquire(ctx);
719	if (ret)
720		return ret;
721
722	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
723
724	spu_release(ctx);
725
726	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
727		return -EFAULT;
728
729	return 4;
730}
731
732static const struct file_operations spufs_mbox_stat_fops = {
733	.open	= spufs_pipe_open,
734	.read	= spufs_mbox_stat_read,
735	.llseek = no_llseek,
736};
737
738/* low-level ibox access function */
739size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
740{
741	return ctx->ops->ibox_read(ctx, data);
742}
743
744static int spufs_ibox_fasync(int fd, struct file *file, int on)
745{
746	struct spu_context *ctx = file->private_data;
747
748	return fasync_helper(fd, file, on, &ctx->ibox_fasync);
749}
750
751/* interrupt-level ibox callback function. */
752void spufs_ibox_callback(struct spu *spu)
753{
754	struct spu_context *ctx = spu->ctx;
755
756	if (!ctx)
757		return;
758
759	wake_up_all(&ctx->ibox_wq);
760	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
761}
762
763/*
764 * Read as many bytes from the interrupt mailbox as possible, until
765 * one of the conditions becomes true:
766 *
767 * - no more data available in the mailbox
768 * - end of the user provided buffer
769 * - end of the mapped area
770 *
771 * If the file is opened without O_NONBLOCK, we wait here until
772 * any data is available, but return when we have been able to
773 * read something.
774 */
775static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
776			size_t len, loff_t *pos)
777{
778	struct spu_context *ctx = file->private_data;
779	u32 ibox_data, __user *udata;
780	ssize_t count;
781
782	if (len < 4)
783		return -EINVAL;
784
785	if (!access_ok(VERIFY_WRITE, buf, len))
786		return -EFAULT;
787
788	udata = (void __user *)buf;
789
790	count = spu_acquire(ctx);
791	if (count)
792		goto out;
793
794	/* wait only for the first element */
795	count = 0;
796	if (file->f_flags & O_NONBLOCK) {
797		if (!spu_ibox_read(ctx, &ibox_data)) {
798			count = -EAGAIN;
799			goto out_unlock;
800		}
801	} else {
802		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
803		if (count)
804			goto out;
805	}
806
807	/* if we can't write at all, return -EFAULT */
808	count = __put_user(ibox_data, udata);
809	if (count)
810		goto out_unlock;
811
812	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
813		int ret;
814		ret = ctx->ops->ibox_read(ctx, &ibox_data);
815		if (ret == 0)
816			break;
817		/*
818		 * at the end of the mapped area, we can fault
819		 * but still need to return the data we have
820		 * read successfully so far.
821		 */
822		ret = __put_user(ibox_data, udata);
823		if (ret)
824			break;
825	}
826
827out_unlock:
828	spu_release(ctx);
829out:
830	return count;
831}
832
833static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
834{
835	struct spu_context *ctx = file->private_data;
836	unsigned int mask;
837
838	poll_wait(file, &ctx->ibox_wq, wait);
839
840	/*
841	 * For now keep this uninterruptible and also ignore the rule
842	 * that poll should not sleep.  Will be fixed later.
843	 */
844	mutex_lock(&ctx->state_mutex);
845	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
846	spu_release(ctx);
847
848	return mask;
849}
850
851static const struct file_operations spufs_ibox_fops = {
852	.open	= spufs_pipe_open,
853	.read	= spufs_ibox_read,
854	.poll	= spufs_ibox_poll,
855	.fasync	= spufs_ibox_fasync,
856	.llseek = no_llseek,
857};
858
859static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
860			size_t len, loff_t *pos)
861{
862	struct spu_context *ctx = file->private_data;
863	ssize_t ret;
864	u32 ibox_stat;
865
866	if (len < 4)
867		return -EINVAL;
868
869	ret = spu_acquire(ctx);
870	if (ret)
871		return ret;
872	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
873	spu_release(ctx);
874
875	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
876		return -EFAULT;
877
878	return 4;
879}
880
881static const struct file_operations spufs_ibox_stat_fops = {
882	.open	= spufs_pipe_open,
883	.read	= spufs_ibox_stat_read,
884	.llseek = no_llseek,
885};
886
887/* low-level mailbox write */
888size_t spu_wbox_write(struct spu_context *ctx, u32 data)
889{
890	return ctx->ops->wbox_write(ctx, data);
891}
892
893static int spufs_wbox_fasync(int fd, struct file *file, int on)
894{
895	struct spu_context *ctx = file->private_data;
896	int ret;
897
898	ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
899
900	return ret;
901}
902
903/* interrupt-level wbox callback function. */
904void spufs_wbox_callback(struct spu *spu)
905{
906	struct spu_context *ctx = spu->ctx;
907
908	if (!ctx)
909		return;
910
911	wake_up_all(&ctx->wbox_wq);
912	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
913}
914
915/*
916 * Write as many bytes to the interrupt mailbox as possible, until
917 * one of the conditions becomes true:
918 *
919 * - the mailbox is full
920 * - end of the user provided buffer
921 * - end of the mapped area
922 *
923 * If the file is opened without O_NONBLOCK, we wait here until
924 * space is availabyl, but return when we have been able to
925 * write something.
926 */
927static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
928			size_t len, loff_t *pos)
929{
930	struct spu_context *ctx = file->private_data;
931	u32 wbox_data, __user *udata;
932	ssize_t count;
933
934	if (len < 4)
935		return -EINVAL;
936
937	udata = (void __user *)buf;
938	if (!access_ok(VERIFY_READ, buf, len))
939		return -EFAULT;
940
941	if (__get_user(wbox_data, udata))
942		return -EFAULT;
943
944	count = spu_acquire(ctx);
945	if (count)
946		goto out;
947
948	/*
949	 * make sure we can at least write one element, by waiting
950	 * in case of !O_NONBLOCK
951	 */
952	count = 0;
953	if (file->f_flags & O_NONBLOCK) {
954		if (!spu_wbox_write(ctx, wbox_data)) {
955			count = -EAGAIN;
956			goto out_unlock;
957		}
958	} else {
959		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
960		if (count)
961			goto out;
962	}
963
964
965	/* write as much as possible */
966	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
967		int ret;
968		ret = __get_user(wbox_data, udata);
969		if (ret)
970			break;
971
972		ret = spu_wbox_write(ctx, wbox_data);
973		if (ret == 0)
974			break;
975	}
976
977out_unlock:
978	spu_release(ctx);
979out:
980	return count;
981}
982
983static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
984{
985	struct spu_context *ctx = file->private_data;
986	unsigned int mask;
987
988	poll_wait(file, &ctx->wbox_wq, wait);
989
990	/*
991	 * For now keep this uninterruptible and also ignore the rule
992	 * that poll should not sleep.  Will be fixed later.
993	 */
994	mutex_lock(&ctx->state_mutex);
995	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
996	spu_release(ctx);
997
998	return mask;
999}
1000
1001static const struct file_operations spufs_wbox_fops = {
1002	.open	= spufs_pipe_open,
1003	.write	= spufs_wbox_write,
1004	.poll	= spufs_wbox_poll,
1005	.fasync	= spufs_wbox_fasync,
1006	.llseek = no_llseek,
1007};
1008
1009static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
1010			size_t len, loff_t *pos)
1011{
1012	struct spu_context *ctx = file->private_data;
1013	ssize_t ret;
1014	u32 wbox_stat;
1015
1016	if (len < 4)
1017		return -EINVAL;
1018
1019	ret = spu_acquire(ctx);
1020	if (ret)
1021		return ret;
1022	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
1023	spu_release(ctx);
1024
1025	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
1026		return -EFAULT;
1027
1028	return 4;
1029}
1030
1031static const struct file_operations spufs_wbox_stat_fops = {
1032	.open	= spufs_pipe_open,
1033	.read	= spufs_wbox_stat_read,
1034	.llseek = no_llseek,
1035};
1036
1037static int spufs_signal1_open(struct inode *inode, struct file *file)
1038{
1039	struct spufs_inode_info *i = SPUFS_I(inode);
1040	struct spu_context *ctx = i->i_ctx;
1041
1042	mutex_lock(&ctx->mapping_lock);
1043	file->private_data = ctx;
1044	if (!i->i_openers++)
1045		ctx->signal1 = inode->i_mapping;
1046	mutex_unlock(&ctx->mapping_lock);
1047	return nonseekable_open(inode, file);
1048}
1049
1050static int
1051spufs_signal1_release(struct inode *inode, struct file *file)
1052{
1053	struct spufs_inode_info *i = SPUFS_I(inode);
1054	struct spu_context *ctx = i->i_ctx;
1055
1056	mutex_lock(&ctx->mapping_lock);
1057	if (!--i->i_openers)
1058		ctx->signal1 = NULL;
1059	mutex_unlock(&ctx->mapping_lock);
1060	return 0;
1061}
1062
1063static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
1064			size_t len, loff_t *pos)
1065{
1066	int ret = 0;
1067	u32 data;
1068
1069	if (len < 4)
1070		return -EINVAL;
1071
1072	if (ctx->csa.spu_chnlcnt_RW[3]) {
1073		data = ctx->csa.spu_chnldata_RW[3];
1074		ret = 4;
1075	}
1076
1077	if (!ret)
1078		goto out;
1079
1080	if (copy_to_user(buf, &data, 4))
1081		return -EFAULT;
1082
1083out:
1084	return ret;
1085}
1086
1087static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1088			size_t len, loff_t *pos)
1089{
1090	int ret;
1091	struct spu_context *ctx = file->private_data;
1092
1093	ret = spu_acquire_saved(ctx);
1094	if (ret)
1095		return ret;
1096	ret = __spufs_signal1_read(ctx, buf, len, pos);
1097	spu_release_saved(ctx);
1098
1099	return ret;
1100}
1101
1102static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1103			size_t len, loff_t *pos)
1104{
1105	struct spu_context *ctx;
1106	ssize_t ret;
1107	u32 data;
1108
1109	ctx = file->private_data;
1110
1111	if (len < 4)
1112		return -EINVAL;
1113
1114	if (copy_from_user(&data, buf, 4))
1115		return -EFAULT;
1116
1117	ret = spu_acquire(ctx);
1118	if (ret)
1119		return ret;
1120	ctx->ops->signal1_write(ctx, data);
1121	spu_release(ctx);
1122
1123	return 4;
1124}
1125
1126static int
1127spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1128{
1129#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1130	return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1131#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1132	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1133	 * signal 1 and 2 area
1134	 */
1135	return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1136#else
1137#error unsupported page size
1138#endif
1139}
1140
1141static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
1142	.fault = spufs_signal1_mmap_fault,
1143};
1144
1145static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1146{
1147	if (!(vma->vm_flags & VM_SHARED))
1148		return -EINVAL;
1149
1150	vma->vm_flags |= VM_IO | VM_PFNMAP;
1151	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1152
1153	vma->vm_ops = &spufs_signal1_mmap_vmops;
1154	return 0;
1155}
1156
1157static const struct file_operations spufs_signal1_fops = {
1158	.open = spufs_signal1_open,
1159	.release = spufs_signal1_release,
1160	.read = spufs_signal1_read,
1161	.write = spufs_signal1_write,
1162	.mmap = spufs_signal1_mmap,
1163	.llseek = no_llseek,
1164};
1165
1166static const struct file_operations spufs_signal1_nosched_fops = {
1167	.open = spufs_signal1_open,
1168	.release = spufs_signal1_release,
1169	.write = spufs_signal1_write,
1170	.mmap = spufs_signal1_mmap,
1171	.llseek = no_llseek,
1172};
1173
1174static int spufs_signal2_open(struct inode *inode, struct file *file)
1175{
1176	struct spufs_inode_info *i = SPUFS_I(inode);
1177	struct spu_context *ctx = i->i_ctx;
1178
1179	mutex_lock(&ctx->mapping_lock);
1180	file->private_data = ctx;
1181	if (!i->i_openers++)
1182		ctx->signal2 = inode->i_mapping;
1183	mutex_unlock(&ctx->mapping_lock);
1184	return nonseekable_open(inode, file);
1185}
1186
1187static int
1188spufs_signal2_release(struct inode *inode, struct file *file)
1189{
1190	struct spufs_inode_info *i = SPUFS_I(inode);
1191	struct spu_context *ctx = i->i_ctx;
1192
1193	mutex_lock(&ctx->mapping_lock);
1194	if (!--i->i_openers)
1195		ctx->signal2 = NULL;
1196	mutex_unlock(&ctx->mapping_lock);
1197	return 0;
1198}
1199
1200static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1201			size_t len, loff_t *pos)
1202{
1203	int ret = 0;
1204	u32 data;
1205
1206	if (len < 4)
1207		return -EINVAL;
1208
1209	if (ctx->csa.spu_chnlcnt_RW[4]) {
1210		data =  ctx->csa.spu_chnldata_RW[4];
1211		ret = 4;
1212	}
1213
1214	if (!ret)
1215		goto out;
1216
1217	if (copy_to_user(buf, &data, 4))
1218		return -EFAULT;
1219
1220out:
1221	return ret;
1222}
1223
1224static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1225			size_t len, loff_t *pos)
1226{
1227	struct spu_context *ctx = file->private_data;
1228	int ret;
1229
1230	ret = spu_acquire_saved(ctx);
1231	if (ret)
1232		return ret;
1233	ret = __spufs_signal2_read(ctx, buf, len, pos);
1234	spu_release_saved(ctx);
1235
1236	return ret;
1237}
1238
1239static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1240			size_t len, loff_t *pos)
1241{
1242	struct spu_context *ctx;
1243	ssize_t ret;
1244	u32 data;
1245
1246	ctx = file->private_data;
1247
1248	if (len < 4)
1249		return -EINVAL;
1250
1251	if (copy_from_user(&data, buf, 4))
1252		return -EFAULT;
1253
1254	ret = spu_acquire(ctx);
1255	if (ret)
1256		return ret;
1257	ctx->ops->signal2_write(ctx, data);
1258	spu_release(ctx);
1259
1260	return 4;
1261}
1262
1263#if SPUFS_MMAP_4K
1264static int
1265spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1266{
1267#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1268	return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1269#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1270	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1271	 * signal 1 and 2 area
1272	 */
1273	return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1274#else
1275#error unsupported page size
1276#endif
1277}
1278
1279static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
1280	.fault = spufs_signal2_mmap_fault,
1281};
1282
1283static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1284{
1285	if (!(vma->vm_flags & VM_SHARED))
1286		return -EINVAL;
1287
1288	vma->vm_flags |= VM_IO | VM_PFNMAP;
1289	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1290
1291	vma->vm_ops = &spufs_signal2_mmap_vmops;
1292	return 0;
1293}
1294#else /* SPUFS_MMAP_4K */
1295#define spufs_signal2_mmap NULL
1296#endif /* !SPUFS_MMAP_4K */
1297
1298static const struct file_operations spufs_signal2_fops = {
1299	.open = spufs_signal2_open,
1300	.release = spufs_signal2_release,
1301	.read = spufs_signal2_read,
1302	.write = spufs_signal2_write,
1303	.mmap = spufs_signal2_mmap,
1304	.llseek = no_llseek,
1305};
1306
1307static const struct file_operations spufs_signal2_nosched_fops = {
1308	.open = spufs_signal2_open,
1309	.release = spufs_signal2_release,
1310	.write = spufs_signal2_write,
1311	.mmap = spufs_signal2_mmap,
1312	.llseek = no_llseek,
1313};
1314
1315/*
1316 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1317 * work of acquiring (or not) the SPU context before calling through
1318 * to the actual get routine. The set routine is called directly.
1319 */
1320#define SPU_ATTR_NOACQUIRE	0
1321#define SPU_ATTR_ACQUIRE	1
1322#define SPU_ATTR_ACQUIRE_SAVED	2
1323
1324#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)	\
1325static int __##__get(void *data, u64 *val)				\
1326{									\
1327	struct spu_context *ctx = data;					\
1328	int ret = 0;							\
1329									\
1330	if (__acquire == SPU_ATTR_ACQUIRE) {				\
1331		ret = spu_acquire(ctx);					\
1332		if (ret)						\
1333			return ret;					\
1334		*val = __get(ctx);					\
1335		spu_release(ctx);					\
1336	} else if (__acquire == SPU_ATTR_ACQUIRE_SAVED)	{		\
1337		ret = spu_acquire_saved(ctx);				\
1338		if (ret)						\
1339			return ret;					\
1340		*val = __get(ctx);					\
1341		spu_release_saved(ctx);					\
1342	} else								\
1343		*val = __get(ctx);					\
1344									\
1345	return 0;							\
1346}									\
1347DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1348
1349static int spufs_signal1_type_set(void *data, u64 val)
1350{
1351	struct spu_context *ctx = data;
1352	int ret;
1353
1354	ret = spu_acquire(ctx);
1355	if (ret)
1356		return ret;
1357	ctx->ops->signal1_type_set(ctx, val);
1358	spu_release(ctx);
1359
1360	return 0;
1361}
1362
1363static u64 spufs_signal1_type_get(struct spu_context *ctx)
1364{
1365	return ctx->ops->signal1_type_get(ctx);
1366}
1367DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1368		       spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1369
1370
1371static int spufs_signal2_type_set(void *data, u64 val)
1372{
1373	struct spu_context *ctx = data;
1374	int ret;
1375
1376	ret = spu_acquire(ctx);
1377	if (ret)
1378		return ret;
1379	ctx->ops->signal2_type_set(ctx, val);
1380	spu_release(ctx);
1381
1382	return 0;
1383}
1384
1385static u64 spufs_signal2_type_get(struct spu_context *ctx)
1386{
1387	return ctx->ops->signal2_type_get(ctx);
1388}
1389DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1390		       spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1391
1392#if SPUFS_MMAP_4K
1393static int
1394spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1395{
1396	return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1397}
1398
1399static const struct vm_operations_struct spufs_mss_mmap_vmops = {
1400	.fault = spufs_mss_mmap_fault,
1401};
1402
1403/*
1404 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1405 */
1406static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1407{
1408	if (!(vma->vm_flags & VM_SHARED))
1409		return -EINVAL;
1410
1411	vma->vm_flags |= VM_IO | VM_PFNMAP;
1412	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1413
1414	vma->vm_ops = &spufs_mss_mmap_vmops;
1415	return 0;
1416}
1417#else /* SPUFS_MMAP_4K */
1418#define spufs_mss_mmap NULL
1419#endif /* !SPUFS_MMAP_4K */
1420
1421static int spufs_mss_open(struct inode *inode, struct file *file)
1422{
1423	struct spufs_inode_info *i = SPUFS_I(inode);
1424	struct spu_context *ctx = i->i_ctx;
1425
1426	file->private_data = i->i_ctx;
1427
1428	mutex_lock(&ctx->mapping_lock);
1429	if (!i->i_openers++)
1430		ctx->mss = inode->i_mapping;
1431	mutex_unlock(&ctx->mapping_lock);
1432	return nonseekable_open(inode, file);
1433}
1434
1435static int
1436spufs_mss_release(struct inode *inode, struct file *file)
1437{
1438	struct spufs_inode_info *i = SPUFS_I(inode);
1439	struct spu_context *ctx = i->i_ctx;
1440
1441	mutex_lock(&ctx->mapping_lock);
1442	if (!--i->i_openers)
1443		ctx->mss = NULL;
1444	mutex_unlock(&ctx->mapping_lock);
1445	return 0;
1446}
1447
1448static const struct file_operations spufs_mss_fops = {
1449	.open	 = spufs_mss_open,
1450	.release = spufs_mss_release,
1451	.mmap	 = spufs_mss_mmap,
1452	.llseek  = no_llseek,
1453};
1454
1455static int
1456spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1457{
1458	return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1459}
1460
1461static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
1462	.fault = spufs_psmap_mmap_fault,
1463};
1464
1465/*
1466 * mmap support for full problem state area [0x00000 - 0x1ffff].
1467 */
1468static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1469{
1470	if (!(vma->vm_flags & VM_SHARED))
1471		return -EINVAL;
1472
1473	vma->vm_flags |= VM_IO | VM_PFNMAP;
1474	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1475
1476	vma->vm_ops = &spufs_psmap_mmap_vmops;
1477	return 0;
1478}
1479
1480static int spufs_psmap_open(struct inode *inode, struct file *file)
1481{
1482	struct spufs_inode_info *i = SPUFS_I(inode);
1483	struct spu_context *ctx = i->i_ctx;
1484
1485	mutex_lock(&ctx->mapping_lock);
1486	file->private_data = i->i_ctx;
1487	if (!i->i_openers++)
1488		ctx->psmap = inode->i_mapping;
1489	mutex_unlock(&ctx->mapping_lock);
1490	return nonseekable_open(inode, file);
1491}
1492
1493static int
1494spufs_psmap_release(struct inode *inode, struct file *file)
1495{
1496	struct spufs_inode_info *i = SPUFS_I(inode);
1497	struct spu_context *ctx = i->i_ctx;
1498
1499	mutex_lock(&ctx->mapping_lock);
1500	if (!--i->i_openers)
1501		ctx->psmap = NULL;
1502	mutex_unlock(&ctx->mapping_lock);
1503	return 0;
1504}
1505
1506static const struct file_operations spufs_psmap_fops = {
1507	.open	 = spufs_psmap_open,
1508	.release = spufs_psmap_release,
1509	.mmap	 = spufs_psmap_mmap,
1510	.llseek  = no_llseek,
1511};
1512
1513
1514#if SPUFS_MMAP_4K
1515static int
1516spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1517{
1518	return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1519}
1520
1521static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
1522	.fault = spufs_mfc_mmap_fault,
1523};
1524
1525/*
1526 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1527 */
1528static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1529{
1530	if (!(vma->vm_flags & VM_SHARED))
1531		return -EINVAL;
1532
1533	vma->vm_flags |= VM_IO | VM_PFNMAP;
1534	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1535
1536	vma->vm_ops = &spufs_mfc_mmap_vmops;
1537	return 0;
1538}
1539#else /* SPUFS_MMAP_4K */
1540#define spufs_mfc_mmap NULL
1541#endif /* !SPUFS_MMAP_4K */
1542
1543static int spufs_mfc_open(struct inode *inode, struct file *file)
1544{
1545	struct spufs_inode_info *i = SPUFS_I(inode);
1546	struct spu_context *ctx = i->i_ctx;
1547
1548	/* we don't want to deal with DMA into other processes */
1549	if (ctx->owner != current->mm)
1550		return -EINVAL;
1551
1552	if (atomic_read(&inode->i_count) != 1)
1553		return -EBUSY;
1554
1555	mutex_lock(&ctx->mapping_lock);
1556	file->private_data = ctx;
1557	if (!i->i_openers++)
1558		ctx->mfc = inode->i_mapping;
1559	mutex_unlock(&ctx->mapping_lock);
1560	return nonseekable_open(inode, file);
1561}
1562
1563static int
1564spufs_mfc_release(struct inode *inode, struct file *file)
1565{
1566	struct spufs_inode_info *i = SPUFS_I(inode);
1567	struct spu_context *ctx = i->i_ctx;
1568
1569	mutex_lock(&ctx->mapping_lock);
1570	if (!--i->i_openers)
1571		ctx->mfc = NULL;
1572	mutex_unlock(&ctx->mapping_lock);
1573	return 0;
1574}
1575
1576/* interrupt-level mfc callback function. */
1577void spufs_mfc_callback(struct spu *spu)
1578{
1579	struct spu_context *ctx = spu->ctx;
1580
1581	if (!ctx)
1582		return;
1583
1584	wake_up_all(&ctx->mfc_wq);
1585
1586	pr_debug("%s %s\n", __func__, spu->name);
1587	if (ctx->mfc_fasync) {
1588		u32 free_elements, tagstatus;
1589		unsigned int mask;
1590
1591		/* no need for spu_acquire in interrupt context */
1592		free_elements = ctx->ops->get_mfc_free_elements(ctx);
1593		tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1594
1595		mask = 0;
1596		if (free_elements & 0xffff)
1597			mask |= POLLOUT;
1598		if (tagstatus & ctx->tagwait)
1599			mask |= POLLIN;
1600
1601		kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1602	}
1603}
1604
1605static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1606{
1607	/* See if there is one tag group is complete */
1608	/* FIXME we need locking around tagwait */
1609	*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1610	ctx->tagwait &= ~*status;
1611	if (*status)
1612		return 1;
1613
1614	/* enable interrupt waiting for any tag group,
1615	   may silently fail if interrupts are already enabled */
1616	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1617	return 0;
1618}
1619
1620static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1621			size_t size, loff_t *pos)
1622{
1623	struct spu_context *ctx = file->private_data;
1624	int ret = -EINVAL;
1625	u32 status;
1626
1627	if (size != 4)
1628		goto out;
1629
1630	ret = spu_acquire(ctx);
1631	if (ret)
1632		return ret;
1633
1634	ret = -EINVAL;
1635	if (file->f_flags & O_NONBLOCK) {
1636		status = ctx->ops->read_mfc_tagstatus(ctx);
1637		if (!(status & ctx->tagwait))
1638			ret = -EAGAIN;
1639		else
1640			/* XXX(hch): shouldn't we clear ret here? */
1641			ctx->tagwait &= ~status;
1642	} else {
1643		ret = spufs_wait(ctx->mfc_wq,
1644			   spufs_read_mfc_tagstatus(ctx, &status));
1645		if (ret)
1646			goto out;
1647	}
1648	spu_release(ctx);
1649
1650	ret = 4;
1651	if (copy_to_user(buffer, &status, 4))
1652		ret = -EFAULT;
1653
1654out:
1655	return ret;
1656}
1657
1658static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1659{
1660	pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
1661		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1662
1663	switch (cmd->cmd) {
1664	case MFC_PUT_CMD:
1665	case MFC_PUTF_CMD:
1666	case MFC_PUTB_CMD:
1667	case MFC_GET_CMD:
1668	case MFC_GETF_CMD:
1669	case MFC_GETB_CMD:
1670		break;
1671	default:
1672		pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1673		return -EIO;
1674	}
1675
1676	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1677		pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1678				cmd->ea, cmd->lsa);
1679		return -EIO;
1680	}
1681
1682	switch (cmd->size & 0xf) {
1683	case 1:
1684		break;
1685	case 2:
1686		if (cmd->lsa & 1)
1687			goto error;
1688		break;
1689	case 4:
1690		if (cmd->lsa & 3)
1691			goto error;
1692		break;
1693	case 8:
1694		if (cmd->lsa & 7)
1695			goto error;
1696		break;
1697	case 0:
1698		if (cmd->lsa & 15)
1699			goto error;
1700		break;
1701	error:
1702	default:
1703		pr_debug("invalid DMA alignment %x for size %x\n",
1704			cmd->lsa & 0xf, cmd->size);
1705		return -EIO;
1706	}
1707
1708	if (cmd->size > 16 * 1024) {
1709		pr_debug("invalid DMA size %x\n", cmd->size);
1710		return -EIO;
1711	}
1712
1713	if (cmd->tag & 0xfff0) {
1714		/* we reserve the higher tag numbers for kernel use */
1715		pr_debug("invalid DMA tag\n");
1716		return -EIO;
1717	}
1718
1719	if (cmd->class) {
1720		/* not supported in this version */
1721		pr_debug("invalid DMA class\n");
1722		return -EIO;
1723	}
1724
1725	return 0;
1726}
1727
1728static int spu_send_mfc_command(struct spu_context *ctx,
1729				struct mfc_dma_command cmd,
1730				int *error)
1731{
1732	*error = ctx->ops->send_mfc_command(ctx, &cmd);
1733	if (*error == -EAGAIN) {
1734		/* wait for any tag group to complete
1735		   so we have space for the new command */
1736		ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1737		/* try again, because the queue might be
1738		   empty again */
1739		*error = ctx->ops->send_mfc_command(ctx, &cmd);
1740		if (*error == -EAGAIN)
1741			return 0;
1742	}
1743	return 1;
1744}
1745
1746static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1747			size_t size, loff_t *pos)
1748{
1749	struct spu_context *ctx = file->private_data;
1750	struct mfc_dma_command cmd;
1751	int ret = -EINVAL;
1752
1753	if (size != sizeof cmd)
1754		goto out;
1755
1756	ret = -EFAULT;
1757	if (copy_from_user(&cmd, buffer, sizeof cmd))
1758		goto out;
1759
1760	ret = spufs_check_valid_dma(&cmd);
1761	if (ret)
1762		goto out;
1763
1764	ret = spu_acquire(ctx);
1765	if (ret)
1766		goto out;
1767
1768	ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1769	if (ret)
1770		goto out;
1771
1772	if (file->f_flags & O_NONBLOCK) {
1773		ret = ctx->ops->send_mfc_command(ctx, &cmd);
1774	} else {
1775		int status;
1776		ret = spufs_wait(ctx->mfc_wq,
1777				 spu_send_mfc_command(ctx, cmd, &status));
1778		if (ret)
1779			goto out;
1780		if (status)
1781			ret = status;
1782	}
1783
1784	if (ret)
1785		goto out_unlock;
1786
1787	ctx->tagwait |= 1 << cmd.tag;
1788	ret = size;
1789
1790out_unlock:
1791	spu_release(ctx);
1792out:
1793	return ret;
1794}
1795
1796static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1797{
1798	struct spu_context *ctx = file->private_data;
1799	u32 free_elements, tagstatus;
1800	unsigned int mask;
1801
1802	poll_wait(file, &ctx->mfc_wq, wait);
1803
1804	/*
1805	 * For now keep this uninterruptible and also ignore the rule
1806	 * that poll should not sleep.  Will be fixed later.
1807	 */
1808	mutex_lock(&ctx->state_mutex);
1809	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1810	free_elements = ctx->ops->get_mfc_free_elements(ctx);
1811	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1812	spu_release(ctx);
1813
1814	mask = 0;
1815	if (free_elements & 0xffff)
1816		mask |= POLLOUT | POLLWRNORM;
1817	if (tagstatus & ctx->tagwait)
1818		mask |= POLLIN | POLLRDNORM;
1819
1820	pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
1821		free_elements, tagstatus, ctx->tagwait);
1822
1823	return mask;
1824}
1825
1826static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1827{
1828	struct spu_context *ctx = file->private_data;
1829	int ret;
1830
1831	ret = spu_acquire(ctx);
1832	if (ret)
1833		goto out;
1834#if 0
1835/* this currently hangs */
1836	ret = spufs_wait(ctx->mfc_wq,
1837			 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1838	if (ret)
1839		goto out;
1840	ret = spufs_wait(ctx->mfc_wq,
1841			 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1842	if (ret)
1843		goto out;
1844#else
1845	ret = 0;
1846#endif
1847	spu_release(ctx);
1848out:
1849	return ret;
1850}
1851
1852static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1853{
1854	struct inode *inode = file_inode(file);
1855	int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1856	if (!err) {
1857		mutex_lock(&inode->i_mutex);
1858		err = spufs_mfc_flush(file, NULL);
1859		mutex_unlock(&inode->i_mutex);
1860	}
1861	return err;
1862}
1863
1864static int spufs_mfc_fasync(int fd, struct file *file, int on)
1865{
1866	struct spu_context *ctx = file->private_data;
1867
1868	return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1869}
1870
1871static const struct file_operations spufs_mfc_fops = {
1872	.open	 = spufs_mfc_open,
1873	.release = spufs_mfc_release,
1874	.read	 = spufs_mfc_read,
1875	.write	 = spufs_mfc_write,
1876	.poll	 = spufs_mfc_poll,
1877	.flush	 = spufs_mfc_flush,
1878	.fsync	 = spufs_mfc_fsync,
1879	.fasync	 = spufs_mfc_fasync,
1880	.mmap	 = spufs_mfc_mmap,
1881	.llseek  = no_llseek,
1882};
1883
1884static int spufs_npc_set(void *data, u64 val)
1885{
1886	struct spu_context *ctx = data;
1887	int ret;
1888
1889	ret = spu_acquire(ctx);
1890	if (ret)
1891		return ret;
1892	ctx->ops->npc_write(ctx, val);
1893	spu_release(ctx);
1894
1895	return 0;
1896}
1897
1898static u64 spufs_npc_get(struct spu_context *ctx)
1899{
1900	return ctx->ops->npc_read(ctx);
1901}
1902DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1903		       "0x%llx\n", SPU_ATTR_ACQUIRE);
1904
1905static int spufs_decr_set(void *data, u64 val)
1906{
1907	struct spu_context *ctx = data;
1908	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1909	int ret;
1910
1911	ret = spu_acquire_saved(ctx);
1912	if (ret)
1913		return ret;
1914	lscsa->decr.slot[0] = (u32) val;
1915	spu_release_saved(ctx);
1916
1917	return 0;
1918}
1919
1920static u64 spufs_decr_get(struct spu_context *ctx)
1921{
1922	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1923	return lscsa->decr.slot[0];
1924}
1925DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1926		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1927
1928static int spufs_decr_status_set(void *data, u64 val)
1929{
1930	struct spu_context *ctx = data;
1931	int ret;
1932
1933	ret = spu_acquire_saved(ctx);
1934	if (ret)
1935		return ret;
1936	if (val)
1937		ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1938	else
1939		ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1940	spu_release_saved(ctx);
1941
1942	return 0;
1943}
1944
1945static u64 spufs_decr_status_get(struct spu_context *ctx)
1946{
1947	if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1948		return SPU_DECR_STATUS_RUNNING;
1949	else
1950		return 0;
1951}
1952DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1953		       spufs_decr_status_set, "0x%llx\n",
1954		       SPU_ATTR_ACQUIRE_SAVED);
1955
1956static int spufs_event_mask_set(void *data, u64 val)
1957{
1958	struct spu_context *ctx = data;
1959	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1960	int ret;
1961
1962	ret = spu_acquire_saved(ctx);
1963	if (ret)
1964		return ret;
1965	lscsa->event_mask.slot[0] = (u32) val;
1966	spu_release_saved(ctx);
1967
1968	return 0;
1969}
1970
1971static u64 spufs_event_mask_get(struct spu_context *ctx)
1972{
1973	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1974	return lscsa->event_mask.slot[0];
1975}
1976
1977DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1978		       spufs_event_mask_set, "0x%llx\n",
1979		       SPU_ATTR_ACQUIRE_SAVED);
1980
1981static u64 spufs_event_status_get(struct spu_context *ctx)
1982{
1983	struct spu_state *state = &ctx->csa;
1984	u64 stat;
1985	stat = state->spu_chnlcnt_RW[0];
1986	if (stat)
1987		return state->spu_chnldata_RW[0];
1988	return 0;
1989}
1990DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1991		       NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1992
1993static int spufs_srr0_set(void *data, u64 val)
1994{
1995	struct spu_context *ctx = data;
1996	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1997	int ret;
1998
1999	ret = spu_acquire_saved(ctx);
2000	if (ret)
2001		return ret;
2002	lscsa->srr0.slot[0] = (u32) val;
2003	spu_release_saved(ctx);
2004
2005	return 0;
2006}
2007
2008static u64 spufs_srr0_get(struct spu_context *ctx)
2009{
2010	struct spu_lscsa *lscsa = ctx->csa.lscsa;
2011	return lscsa->srr0.slot[0];
2012}
2013DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
2014		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
2015
2016static u64 spufs_id_get(struct spu_context *ctx)
2017{
2018	u64 num;
2019
2020	if (ctx->state == SPU_STATE_RUNNABLE)
2021		num = ctx->spu->number;
2022	else
2023		num = (unsigned int)-1;
2024
2025	return num;
2026}
2027DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
2028		       SPU_ATTR_ACQUIRE)
2029
2030static u64 spufs_object_id_get(struct spu_context *ctx)
2031{
2032	/* FIXME: Should there really be no locking here? */
2033	return ctx->object_id;
2034}
2035
2036static int spufs_object_id_set(void *data, u64 id)
2037{
2038	struct spu_context *ctx = data;
2039	ctx->object_id = id;
2040
2041	return 0;
2042}
2043
2044DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
2045		       spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
2046
2047static u64 spufs_lslr_get(struct spu_context *ctx)
2048{
2049	return ctx->csa.priv2.spu_lslr_RW;
2050}
2051DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
2052		       SPU_ATTR_ACQUIRE_SAVED);
2053
2054static int spufs_info_open(struct inode *inode, struct file *file)
2055{
2056	struct spufs_inode_info *i = SPUFS_I(inode);
2057	struct spu_context *ctx = i->i_ctx;
2058	file->private_data = ctx;
2059	return 0;
2060}
2061
2062static int spufs_caps_show(struct seq_file *s, void *private)
2063{
2064	struct spu_context *ctx = s->private;
2065
2066	if (!(ctx->flags & SPU_CREATE_NOSCHED))
2067		seq_puts(s, "sched\n");
2068	if (!(ctx->flags & SPU_CREATE_ISOLATE))
2069		seq_puts(s, "step\n");
2070	return 0;
2071}
2072
2073static int spufs_caps_open(struct inode *inode, struct file *file)
2074{
2075	return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2076}
2077
2078static const struct file_operations spufs_caps_fops = {
2079	.open		= spufs_caps_open,
2080	.read		= seq_read,
2081	.llseek		= seq_lseek,
2082	.release	= single_release,
2083};
2084
2085static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2086			char __user *buf, size_t len, loff_t *pos)
2087{
2088	u32 data;
2089
2090	/* EOF if there's no entry in the mbox */
2091	if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2092		return 0;
2093
2094	data = ctx->csa.prob.pu_mb_R;
2095
2096	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2097}
2098
2099static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2100				   size_t len, loff_t *pos)
2101{
2102	int ret;
2103	struct spu_context *ctx = file->private_data;
2104
2105	if (!access_ok(VERIFY_WRITE, buf, len))
2106		return -EFAULT;
2107
2108	ret = spu_acquire_saved(ctx);
2109	if (ret)
2110		return ret;
2111	spin_lock(&ctx->csa.register_lock);
2112	ret = __spufs_mbox_info_read(ctx, buf, len, pos);
2113	spin_unlock(&ctx->csa.register_lock);
2114	spu_release_saved(ctx);
2115
2116	return ret;
2117}
2118
2119static const struct file_operations spufs_mbox_info_fops = {
2120	.open = spufs_info_open,
2121	.read = spufs_mbox_info_read,
2122	.llseek  = generic_file_llseek,
2123};
2124
2125static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2126				char __user *buf, size_t len, loff_t *pos)
2127{
2128	u32 data;
2129
2130	/* EOF if there's no entry in the ibox */
2131	if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2132		return 0;
2133
2134	data = ctx->csa.priv2.puint_mb_R;
2135
2136	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2137}
2138
2139static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2140				   size_t len, loff_t *pos)
2141{
2142	struct spu_context *ctx = file->private_data;
2143	int ret;
2144
2145	if (!access_ok(VERIFY_WRITE, buf, len))
2146		return -EFAULT;
2147
2148	ret = spu_acquire_saved(ctx);
2149	if (ret)
2150		return ret;
2151	spin_lock(&ctx->csa.register_lock);
2152	ret = __spufs_ibox_info_read(ctx, buf, len, pos);
2153	spin_unlock(&ctx->csa.register_lock);
2154	spu_release_saved(ctx);
2155
2156	return ret;
2157}
2158
2159static const struct file_operations spufs_ibox_info_fops = {
2160	.open = spufs_info_open,
2161	.read = spufs_ibox_info_read,
2162	.llseek  = generic_file_llseek,
2163};
2164
2165static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2166			char __user *buf, size_t len, loff_t *pos)
2167{
2168	int i, cnt;
2169	u32 data[4];
2170	u32 wbox_stat;
2171
2172	wbox_stat = ctx->csa.prob.mb_stat_R;
2173	cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2174	for (i = 0; i < cnt; i++) {
2175		data[i] = ctx->csa.spu_mailbox_data[i];
2176	}
2177
2178	return simple_read_from_buffer(buf, len, pos, &data,
2179				cnt * sizeof(u32));
2180}
2181
2182static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2183				   size_t len, loff_t *pos)
2184{
2185	struct spu_context *ctx = file->private_data;
2186	int ret;
2187
2188	if (!access_ok(VERIFY_WRITE, buf, len))
2189		return -EFAULT;
2190
2191	ret = spu_acquire_saved(ctx);
2192	if (ret)
2193		return ret;
2194	spin_lock(&ctx->csa.register_lock);
2195	ret = __spufs_wbox_info_read(ctx, buf, len, pos);
2196	spin_unlock(&ctx->csa.register_lock);
2197	spu_release_saved(ctx);
2198
2199	return ret;
2200}
2201
2202static const struct file_operations spufs_wbox_info_fops = {
2203	.open = spufs_info_open,
2204	.read = spufs_wbox_info_read,
2205	.llseek  = generic_file_llseek,
2206};
2207
2208static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2209			char __user *buf, size_t len, loff_t *pos)
2210{
2211	struct spu_dma_info info;
2212	struct mfc_cq_sr *qp, *spuqp;
2213	int i;
2214
2215	info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2216	info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2217	info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2218	info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2219	info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2220	for (i = 0; i < 16; i++) {
2221		qp = &info.dma_info_command_data[i];
2222		spuqp = &ctx->csa.priv2.spuq[i];
2223
2224		qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2225		qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2226		qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2227		qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2228	}
2229
2230	return simple_read_from_buffer(buf, len, pos, &info,
2231				sizeof info);
2232}
2233
2234static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2235			      size_t len, loff_t *pos)
2236{
2237	struct spu_context *ctx = file->private_data;
2238	int ret;
2239
2240	if (!access_ok(VERIFY_WRITE, buf, len))
2241		return -EFAULT;
2242
2243	ret = spu_acquire_saved(ctx);
2244	if (ret)
2245		return ret;
2246	spin_lock(&ctx->csa.register_lock);
2247	ret = __spufs_dma_info_read(ctx, buf, len, pos);
2248	spin_unlock(&ctx->csa.register_lock);
2249	spu_release_saved(ctx);
2250
2251	return ret;
2252}
2253
2254static const struct file_operations spufs_dma_info_fops = {
2255	.open = spufs_info_open,
2256	.read = spufs_dma_info_read,
2257	.llseek = no_llseek,
2258};
2259
2260static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2261			char __user *buf, size_t len, loff_t *pos)
2262{
2263	struct spu_proxydma_info info;
2264	struct mfc_cq_sr *qp, *puqp;
2265	int ret = sizeof info;
2266	int i;
2267
2268	if (len < ret)
2269		return -EINVAL;
2270
2271	if (!access_ok(VERIFY_WRITE, buf, len))
2272		return -EFAULT;
2273
2274	info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2275	info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2276	info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2277	for (i = 0; i < 8; i++) {
2278		qp = &info.proxydma_info_command_data[i];
2279		puqp = &ctx->csa.priv2.puq[i];
2280
2281		qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2282		qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2283		qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2284		qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2285	}
2286
2287	return simple_read_from_buffer(buf, len, pos, &info,
2288				sizeof info);
2289}
2290
2291static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2292				   size_t len, loff_t *pos)
2293{
2294	struct spu_context *ctx = file->private_data;
2295	int ret;
2296
2297	ret = spu_acquire_saved(ctx);
2298	if (ret)
2299		return ret;
2300	spin_lock(&ctx->csa.register_lock);
2301	ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2302	spin_unlock(&ctx->csa.register_lock);
2303	spu_release_saved(ctx);
2304
2305	return ret;
2306}
2307
2308static const struct file_operations spufs_proxydma_info_fops = {
2309	.open = spufs_info_open,
2310	.read = spufs_proxydma_info_read,
2311	.llseek = no_llseek,
2312};
2313
2314static int spufs_show_tid(struct seq_file *s, void *private)
2315{
2316	struct spu_context *ctx = s->private;
2317
2318	seq_printf(s, "%d\n", ctx->tid);
2319	return 0;
2320}
2321
2322static int spufs_tid_open(struct inode *inode, struct file *file)
2323{
2324	return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2325}
2326
2327static const struct file_operations spufs_tid_fops = {
2328	.open		= spufs_tid_open,
2329	.read		= seq_read,
2330	.llseek		= seq_lseek,
2331	.release	= single_release,
2332};
2333
2334static const char *ctx_state_names[] = {
2335	"user", "system", "iowait", "loaded"
2336};
2337
2338static unsigned long long spufs_acct_time(struct spu_context *ctx,
2339		enum spu_utilization_state state)
2340{
2341	unsigned long long time = ctx->stats.times[state];
2342
2343	/*
2344	 * In general, utilization statistics are updated by the controlling
2345	 * thread as the spu context moves through various well defined
2346	 * state transitions, but if the context is lazily loaded its
2347	 * utilization statistics are not updated as the controlling thread
2348	 * is not tightly coupled with the execution of the spu context.  We
2349	 * calculate and apply the time delta from the last recorded state
2350	 * of the spu context.
2351	 */
2352	if (ctx->spu && ctx->stats.util_state == state) {
2353		time += ktime_get_ns() - ctx->stats.tstamp;
2354	}
2355
2356	return time / NSEC_PER_MSEC;
2357}
2358
2359static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2360{
2361	unsigned long long slb_flts = ctx->stats.slb_flt;
2362
2363	if (ctx->state == SPU_STATE_RUNNABLE) {
2364		slb_flts += (ctx->spu->stats.slb_flt -
2365			     ctx->stats.slb_flt_base);
2366	}
2367
2368	return slb_flts;
2369}
2370
2371static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2372{
2373	unsigned long long class2_intrs = ctx->stats.class2_intr;
2374
2375	if (ctx->state == SPU_STATE_RUNNABLE) {
2376		class2_intrs += (ctx->spu->stats.class2_intr -
2377				 ctx->stats.class2_intr_base);
2378	}
2379
2380	return class2_intrs;
2381}
2382
2383
2384static int spufs_show_stat(struct seq_file *s, void *private)
2385{
2386	struct spu_context *ctx = s->private;
2387	int ret;
2388
2389	ret = spu_acquire(ctx);
2390	if (ret)
2391		return ret;
2392
2393	seq_printf(s, "%s %llu %llu %llu %llu "
2394		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2395		ctx_state_names[ctx->stats.util_state],
2396		spufs_acct_time(ctx, SPU_UTIL_USER),
2397		spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2398		spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2399		spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2400		ctx->stats.vol_ctx_switch,
2401		ctx->stats.invol_ctx_switch,
2402		spufs_slb_flts(ctx),
2403		ctx->stats.hash_flt,
2404		ctx->stats.min_flt,
2405		ctx->stats.maj_flt,
2406		spufs_class2_intrs(ctx),
2407		ctx->stats.libassist);
2408	spu_release(ctx);
2409	return 0;
2410}
2411
2412static int spufs_stat_open(struct inode *inode, struct file *file)
2413{
2414	return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2415}
2416
2417static const struct file_operations spufs_stat_fops = {
2418	.open		= spufs_stat_open,
2419	.read		= seq_read,
2420	.llseek		= seq_lseek,
2421	.release	= single_release,
2422};
2423
2424static inline int spufs_switch_log_used(struct spu_context *ctx)
2425{
2426	return (ctx->switch_log->head - ctx->switch_log->tail) %
2427		SWITCH_LOG_BUFSIZE;
2428}
2429
2430static inline int spufs_switch_log_avail(struct spu_context *ctx)
2431{
2432	return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2433}
2434
2435static int spufs_switch_log_open(struct inode *inode, struct file *file)
2436{
2437	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2438	int rc;
2439
2440	rc = spu_acquire(ctx);
2441	if (rc)
2442		return rc;
2443
2444	if (ctx->switch_log) {
2445		rc = -EBUSY;
2446		goto out;
2447	}
2448
2449	ctx->switch_log = kmalloc(sizeof(struct switch_log) +
2450		SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2451		GFP_KERNEL);
2452
2453	if (!ctx->switch_log) {
2454		rc = -ENOMEM;
2455		goto out;
2456	}
2457
2458	ctx->switch_log->head = ctx->switch_log->tail = 0;
2459	init_waitqueue_head(&ctx->switch_log->wait);
2460	rc = 0;
2461
2462out:
2463	spu_release(ctx);
2464	return rc;
2465}
2466
2467static int spufs_switch_log_release(struct inode *inode, struct file *file)
2468{
2469	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2470	int rc;
2471
2472	rc = spu_acquire(ctx);
2473	if (rc)
2474		return rc;
2475
2476	kfree(ctx->switch_log);
2477	ctx->switch_log = NULL;
2478	spu_release(ctx);
2479
2480	return 0;
2481}
2482
2483static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2484{
2485	struct switch_log_entry *p;
2486
2487	p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2488
2489	return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2490			(unsigned int) p->tstamp.tv_sec,
2491			(unsigned int) p->tstamp.tv_nsec,
2492			p->spu_id,
2493			(unsigned int) p->type,
2494			(unsigned int) p->val,
2495			(unsigned long long) p->timebase);
2496}
2497
2498static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2499			     size_t len, loff_t *ppos)
2500{
2501	struct inode *inode = file_inode(file);
2502	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2503	int error = 0, cnt = 0;
2504
2505	if (!buf)
2506		return -EINVAL;
2507
2508	error = spu_acquire(ctx);
2509	if (error)
2510		return error;
2511
2512	while (cnt < len) {
2513		char tbuf[128];
2514		int width;
2515
2516		if (spufs_switch_log_used(ctx) == 0) {
2517			if (cnt > 0) {
2518				/* If there's data ready to go, we can
2519				 * just return straight away */
2520				break;
2521
2522			} else if (file->f_flags & O_NONBLOCK) {
2523				error = -EAGAIN;
2524				break;
2525
2526			} else {
2527				/* spufs_wait will drop the mutex and
2528				 * re-acquire, but since we're in read(), the
2529				 * file cannot be _released (and so
2530				 * ctx->switch_log is stable).
2531				 */
2532				error = spufs_wait(ctx->switch_log->wait,
2533						spufs_switch_log_used(ctx) > 0);
2534
2535				/* On error, spufs_wait returns without the
2536				 * state mutex held */
2537				if (error)
2538					return error;
2539
2540				/* We may have had entries read from underneath
2541				 * us while we dropped the mutex in spufs_wait,
2542				 * so re-check */
2543				if (spufs_switch_log_used(ctx) == 0)
2544					continue;
2545			}
2546		}
2547
2548		width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
2549		if (width < len)
2550			ctx->switch_log->tail =
2551				(ctx->switch_log->tail + 1) %
2552				 SWITCH_LOG_BUFSIZE;
2553		else
2554			/* If the record is greater than space available return
2555			 * partial buffer (so far) */
2556			break;
2557
2558		error = copy_to_user(buf + cnt, tbuf, width);
2559		if (error)
2560			break;
2561		cnt += width;
2562	}
2563
2564	spu_release(ctx);
2565
2566	return cnt == 0 ? error : cnt;
2567}
2568
2569static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2570{
2571	struct inode *inode = file_inode(file);
2572	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2573	unsigned int mask = 0;
2574	int rc;
2575
2576	poll_wait(file, &ctx->switch_log->wait, wait);
2577
2578	rc = spu_acquire(ctx);
2579	if (rc)
2580		return rc;
2581
2582	if (spufs_switch_log_used(ctx) > 0)
2583		mask |= POLLIN;
2584
2585	spu_release(ctx);
2586
2587	return mask;
2588}
2589
2590static const struct file_operations spufs_switch_log_fops = {
2591	.open		= spufs_switch_log_open,
2592	.read		= spufs_switch_log_read,
2593	.poll		= spufs_switch_log_poll,
2594	.release	= spufs_switch_log_release,
2595	.llseek		= no_llseek,
2596};
2597
2598/**
2599 * Log a context switch event to a switch log reader.
2600 *
2601 * Must be called with ctx->state_mutex held.
2602 */
2603void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2604		u32 type, u32 val)
2605{
2606	if (!ctx->switch_log)
2607		return;
2608
2609	if (spufs_switch_log_avail(ctx) > 1) {
2610		struct switch_log_entry *p;
2611
2612		p = ctx->switch_log->log + ctx->switch_log->head;
2613		ktime_get_ts(&p->tstamp);
2614		p->timebase = get_tb();
2615		p->spu_id = spu ? spu->number : -1;
2616		p->type = type;
2617		p->val = val;
2618
2619		ctx->switch_log->head =
2620			(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2621	}
2622
2623	wake_up(&ctx->switch_log->wait);
2624}
2625
2626static int spufs_show_ctx(struct seq_file *s, void *private)
2627{
2628	struct spu_context *ctx = s->private;
2629	u64 mfc_control_RW;
2630
2631	mutex_lock(&ctx->state_mutex);
2632	if (ctx->spu) {
2633		struct spu *spu = ctx->spu;
2634		struct spu_priv2 __iomem *priv2 = spu->priv2;
2635
2636		spin_lock_irq(&spu->register_lock);
2637		mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2638		spin_unlock_irq(&spu->register_lock);
2639	} else {
2640		struct spu_state *csa = &ctx->csa;
2641
2642		mfc_control_RW = csa->priv2.mfc_control_RW;
2643	}
2644
2645	seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2646		" %c %llx %llx %llx %llx %x %x\n",
2647		ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2648		ctx->flags,
2649		ctx->sched_flags,
2650		ctx->prio,
2651		ctx->time_slice,
2652		ctx->spu ? ctx->spu->number : -1,
2653		!list_empty(&ctx->rq) ? 'q' : ' ',
2654		ctx->csa.class_0_pending,
2655		ctx->csa.class_0_dar,
2656		ctx->csa.class_1_dsisr,
2657		mfc_control_RW,
2658		ctx->ops->runcntl_read(ctx),
2659		ctx->ops->status_read(ctx));
2660
2661	mutex_unlock(&ctx->state_mutex);
2662
2663	return 0;
2664}
2665
2666static int spufs_ctx_open(struct inode *inode, struct file *file)
2667{
2668	return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2669}
2670
2671static const struct file_operations spufs_ctx_fops = {
2672	.open           = spufs_ctx_open,
2673	.read           = seq_read,
2674	.llseek         = seq_lseek,
2675	.release        = single_release,
2676};
2677
2678const struct spufs_tree_descr spufs_dir_contents[] = {
2679	{ "capabilities", &spufs_caps_fops, 0444, },
2680	{ "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2681	{ "regs", &spufs_regs_fops,  0666, sizeof(struct spu_reg128[128]), },
2682	{ "mbox", &spufs_mbox_fops, 0444, },
2683	{ "ibox", &spufs_ibox_fops, 0444, },
2684	{ "wbox", &spufs_wbox_fops, 0222, },
2685	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2686	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2687	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2688	{ "signal1", &spufs_signal1_fops, 0666, },
2689	{ "signal2", &spufs_signal2_fops, 0666, },
2690	{ "signal1_type", &spufs_signal1_type, 0666, },
2691	{ "signal2_type", &spufs_signal2_type, 0666, },
2692	{ "cntl", &spufs_cntl_fops,  0666, },
2693	{ "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
2694	{ "lslr", &spufs_lslr_ops, 0444, },
2695	{ "mfc", &spufs_mfc_fops, 0666, },
2696	{ "mss", &spufs_mss_fops, 0666, },
2697	{ "npc", &spufs_npc_ops, 0666, },
2698	{ "srr0", &spufs_srr0_ops, 0666, },
2699	{ "decr", &spufs_decr_ops, 0666, },
2700	{ "decr_status", &spufs_decr_status_ops, 0666, },
2701	{ "event_mask", &spufs_event_mask_ops, 0666, },
2702	{ "event_status", &spufs_event_status_ops, 0444, },
2703	{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2704	{ "phys-id", &spufs_id_ops, 0666, },
2705	{ "object-id", &spufs_object_id_ops, 0666, },
2706	{ "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2707	{ "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2708	{ "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2709	{ "dma_info", &spufs_dma_info_fops, 0444,
2710		sizeof(struct spu_dma_info), },
2711	{ "proxydma_info", &spufs_proxydma_info_fops, 0444,
2712		sizeof(struct spu_proxydma_info)},
2713	{ "tid", &spufs_tid_fops, 0444, },
2714	{ "stat", &spufs_stat_fops, 0444, },
2715	{ "switch_log", &spufs_switch_log_fops, 0444 },
2716	{},
2717};
2718
2719const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
2720	{ "capabilities", &spufs_caps_fops, 0444, },
2721	{ "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2722	{ "mbox", &spufs_mbox_fops, 0444, },
2723	{ "ibox", &spufs_ibox_fops, 0444, },
2724	{ "wbox", &spufs_wbox_fops, 0222, },
2725	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2726	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2727	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2728	{ "signal1", &spufs_signal1_nosched_fops, 0222, },
2729	{ "signal2", &spufs_signal2_nosched_fops, 0222, },
2730	{ "signal1_type", &spufs_signal1_type, 0666, },
2731	{ "signal2_type", &spufs_signal2_type, 0666, },
2732	{ "mss", &spufs_mss_fops, 0666, },
2733	{ "mfc", &spufs_mfc_fops, 0666, },
2734	{ "cntl", &spufs_cntl_fops,  0666, },
2735	{ "npc", &spufs_npc_ops, 0666, },
2736	{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2737	{ "phys-id", &spufs_id_ops, 0666, },
2738	{ "object-id", &spufs_object_id_ops, 0666, },
2739	{ "tid", &spufs_tid_fops, 0444, },
2740	{ "stat", &spufs_stat_fops, 0444, },
2741	{},
2742};
2743
2744const struct spufs_tree_descr spufs_dir_debug_contents[] = {
2745	{ ".ctx", &spufs_ctx_fops, 0444, },
2746	{},
2747};
2748
2749const struct spufs_coredump_reader spufs_coredump_read[] = {
2750	{ "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2751	{ "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2752	{ "lslr", NULL, spufs_lslr_get, 19 },
2753	{ "decr", NULL, spufs_decr_get, 19 },
2754	{ "decr_status", NULL, spufs_decr_status_get, 19 },
2755	{ "mem", __spufs_mem_read, NULL, LS_SIZE, },
2756	{ "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2757	{ "signal1_type", NULL, spufs_signal1_type_get, 19 },
2758	{ "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2759	{ "signal2_type", NULL, spufs_signal2_type_get, 19 },
2760	{ "event_mask", NULL, spufs_event_mask_get, 19 },
2761	{ "event_status", NULL, spufs_event_status_get, 19 },
2762	{ "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2763	{ "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2764	{ "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2765	{ "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2766	{ "proxydma_info", __spufs_proxydma_info_read,
2767			   NULL, sizeof(struct spu_proxydma_info)},
2768	{ "object-id", NULL, spufs_object_id_get, 19 },
2769	{ "npc", NULL, spufs_npc_get, 19 },
2770	{ NULL },
2771};
2772