1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include <engine/fifo.h>
25
26#include <core/client.h>
27#include <core/engctx.h>
28#include <core/enum.h>
29#include <core/handle.h>
30#include <subdev/bar.h>
31#include <subdev/fb.h>
32#include <subdev/mmu.h>
33#include <subdev/timer.h>
34
35#include <nvif/class.h>
36#include <nvif/unpack.h>
37
38struct gf100_fifo_priv {
39	struct nvkm_fifo base;
40
41	struct work_struct fault;
42	u64 mask;
43
44	struct {
45		struct nvkm_gpuobj *mem[2];
46		int active;
47		wait_queue_head_t wait;
48	} runlist;
49
50	struct {
51		struct nvkm_gpuobj *mem;
52		struct nvkm_vma bar;
53	} user;
54	int spoon_nr;
55};
56
57struct gf100_fifo_base {
58	struct nvkm_fifo_base base;
59	struct nvkm_gpuobj *pgd;
60	struct nvkm_vm *vm;
61};
62
63struct gf100_fifo_chan {
64	struct nvkm_fifo_chan base;
65	enum {
66		STOPPED,
67		RUNNING,
68		KILLED
69	} state;
70};
71
72/*******************************************************************************
73 * FIFO channel objects
74 ******************************************************************************/
75
76static void
77gf100_fifo_runlist_update(struct gf100_fifo_priv *priv)
78{
79	struct nvkm_bar *bar = nvkm_bar(priv);
80	struct nvkm_gpuobj *cur;
81	int i, p;
82
83	mutex_lock(&nv_subdev(priv)->mutex);
84	cur = priv->runlist.mem[priv->runlist.active];
85	priv->runlist.active = !priv->runlist.active;
86
87	for (i = 0, p = 0; i < 128; i++) {
88		struct gf100_fifo_chan *chan = (void *)priv->base.channel[i];
89		if (chan && chan->state == RUNNING) {
90			nv_wo32(cur, p + 0, i);
91			nv_wo32(cur, p + 4, 0x00000004);
92			p += 8;
93		}
94	}
95	bar->flush(bar);
96
97	nv_wr32(priv, 0x002270, cur->addr >> 12);
98	nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
99
100	if (wait_event_timeout(priv->runlist.wait,
101			       !(nv_rd32(priv, 0x00227c) & 0x00100000),
102			       msecs_to_jiffies(2000)) == 0)
103		nv_error(priv, "runlist update timeout\n");
104	mutex_unlock(&nv_subdev(priv)->mutex);
105}
106
107static int
108gf100_fifo_context_attach(struct nvkm_object *parent,
109			  struct nvkm_object *object)
110{
111	struct nvkm_bar *bar = nvkm_bar(parent);
112	struct gf100_fifo_base *base = (void *)parent->parent;
113	struct nvkm_engctx *ectx = (void *)object;
114	u32 addr;
115	int ret;
116
117	switch (nv_engidx(object->engine)) {
118	case NVDEV_ENGINE_SW    : return 0;
119	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
120	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
121	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
122	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
123	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
124	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
125	default:
126		return -EINVAL;
127	}
128
129	if (!ectx->vma.node) {
130		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
131					 NV_MEM_ACCESS_RW, &ectx->vma);
132		if (ret)
133			return ret;
134
135		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
136	}
137
138	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
139	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
140	bar->flush(bar);
141	return 0;
142}
143
144static int
145gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
146			  struct nvkm_object *object)
147{
148	struct nvkm_bar *bar = nvkm_bar(parent);
149	struct gf100_fifo_priv *priv = (void *)parent->engine;
150	struct gf100_fifo_base *base = (void *)parent->parent;
151	struct gf100_fifo_chan *chan = (void *)parent;
152	u32 addr;
153
154	switch (nv_engidx(object->engine)) {
155	case NVDEV_ENGINE_SW    : return 0;
156	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
157	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
158	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
159	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
160	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
161	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
162	default:
163		return -EINVAL;
164	}
165
166	nv_wr32(priv, 0x002634, chan->base.chid);
167	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
168		nv_error(priv, "channel %d [%s] kick timeout\n",
169			 chan->base.chid, nvkm_client_name(chan));
170		if (suspend)
171			return -EBUSY;
172	}
173
174	nv_wo32(base, addr + 0x00, 0x00000000);
175	nv_wo32(base, addr + 0x04, 0x00000000);
176	bar->flush(bar);
177	return 0;
178}
179
180static int
181gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
182		     struct nvkm_oclass *oclass, void *data, u32 size,
183		     struct nvkm_object **pobject)
184{
185	union {
186		struct nv50_channel_gpfifo_v0 v0;
187	} *args = data;
188	struct nvkm_bar *bar = nvkm_bar(parent);
189	struct gf100_fifo_priv *priv = (void *)engine;
190	struct gf100_fifo_base *base = (void *)parent;
191	struct gf100_fifo_chan *chan;
192	u64 usermem, ioffset, ilength;
193	int ret, i;
194
195	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
196	if (nvif_unpack(args->v0, 0, 0, false)) {
197		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
198				 "ioffset %016llx ilength %08x\n",
199			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
200			 args->v0.ilength);
201	} else
202		return ret;
203
204	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
205				       priv->user.bar.offset, 0x1000,
206				       args->v0.pushbuf,
207				       (1ULL << NVDEV_ENGINE_SW) |
208				       (1ULL << NVDEV_ENGINE_GR) |
209				       (1ULL << NVDEV_ENGINE_CE0) |
210				       (1ULL << NVDEV_ENGINE_CE1) |
211				       (1ULL << NVDEV_ENGINE_MSVLD) |
212				       (1ULL << NVDEV_ENGINE_MSPDEC) |
213				       (1ULL << NVDEV_ENGINE_MSPPP), &chan);
214	*pobject = nv_object(chan);
215	if (ret)
216		return ret;
217
218	args->v0.chid = chan->base.chid;
219
220	nv_parent(chan)->context_attach = gf100_fifo_context_attach;
221	nv_parent(chan)->context_detach = gf100_fifo_context_detach;
222
223	usermem = chan->base.chid * 0x1000;
224	ioffset = args->v0.ioffset;
225	ilength = order_base_2(args->v0.ilength / 8);
226
227	for (i = 0; i < 0x1000; i += 4)
228		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
229
230	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
231	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
232	nv_wo32(base, 0x10, 0x0000face);
233	nv_wo32(base, 0x30, 0xfffff902);
234	nv_wo32(base, 0x48, lower_32_bits(ioffset));
235	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
236	nv_wo32(base, 0x54, 0x00000002);
237	nv_wo32(base, 0x84, 0x20400000);
238	nv_wo32(base, 0x94, 0x30000001);
239	nv_wo32(base, 0x9c, 0x00000100);
240	nv_wo32(base, 0xa4, 0x1f1f1f1f);
241	nv_wo32(base, 0xa8, 0x1f1f1f1f);
242	nv_wo32(base, 0xac, 0x0000001f);
243	nv_wo32(base, 0xb8, 0xf8000000);
244	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
245	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
246	bar->flush(bar);
247	return 0;
248}
249
250static int
251gf100_fifo_chan_init(struct nvkm_object *object)
252{
253	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
254	struct gf100_fifo_priv *priv = (void *)object->engine;
255	struct gf100_fifo_chan *chan = (void *)object;
256	u32 chid = chan->base.chid;
257	int ret;
258
259	ret = nvkm_fifo_channel_init(&chan->base);
260	if (ret)
261		return ret;
262
263	nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
264
265	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
266		nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
267		gf100_fifo_runlist_update(priv);
268	}
269
270	return 0;
271}
272
273static void gf100_fifo_intr_engine(struct gf100_fifo_priv *priv);
274
275static int
276gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
277{
278	struct gf100_fifo_priv *priv = (void *)object->engine;
279	struct gf100_fifo_chan *chan = (void *)object;
280	u32 chid = chan->base.chid;
281
282	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
283		nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
284		gf100_fifo_runlist_update(priv);
285	}
286
287	gf100_fifo_intr_engine(priv);
288
289	nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
290	return nvkm_fifo_channel_fini(&chan->base, suspend);
291}
292
293static struct nvkm_ofuncs
294gf100_fifo_ofuncs = {
295	.ctor = gf100_fifo_chan_ctor,
296	.dtor = _nvkm_fifo_channel_dtor,
297	.init = gf100_fifo_chan_init,
298	.fini = gf100_fifo_chan_fini,
299	.map  = _nvkm_fifo_channel_map,
300	.rd32 = _nvkm_fifo_channel_rd32,
301	.wr32 = _nvkm_fifo_channel_wr32,
302	.ntfy = _nvkm_fifo_channel_ntfy
303};
304
305static struct nvkm_oclass
306gf100_fifo_sclass[] = {
307	{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
308	{}
309};
310
311/*******************************************************************************
312 * FIFO context - instmem heap and vm setup
313 ******************************************************************************/
314
315static int
316gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
317			struct nvkm_oclass *oclass, void *data, u32 size,
318			struct nvkm_object **pobject)
319{
320	struct gf100_fifo_base *base;
321	int ret;
322
323	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
324				       0x1000, NVOBJ_FLAG_ZERO_ALLOC |
325				       NVOBJ_FLAG_HEAP, &base);
326	*pobject = nv_object(base);
327	if (ret)
328		return ret;
329
330	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
331			      &base->pgd);
332	if (ret)
333		return ret;
334
335	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
336	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
337	nv_wo32(base, 0x0208, 0xffffffff);
338	nv_wo32(base, 0x020c, 0x000000ff);
339
340	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
341	if (ret)
342		return ret;
343
344	return 0;
345}
346
347static void
348gf100_fifo_context_dtor(struct nvkm_object *object)
349{
350	struct gf100_fifo_base *base = (void *)object;
351	nvkm_vm_ref(NULL, &base->vm, base->pgd);
352	nvkm_gpuobj_ref(NULL, &base->pgd);
353	nvkm_fifo_context_destroy(&base->base);
354}
355
356static struct nvkm_oclass
357gf100_fifo_cclass = {
358	.handle = NV_ENGCTX(FIFO, 0xc0),
359	.ofuncs = &(struct nvkm_ofuncs) {
360		.ctor = gf100_fifo_context_ctor,
361		.dtor = gf100_fifo_context_dtor,
362		.init = _nvkm_fifo_context_init,
363		.fini = _nvkm_fifo_context_fini,
364		.rd32 = _nvkm_fifo_context_rd32,
365		.wr32 = _nvkm_fifo_context_wr32,
366	},
367};
368
369/*******************************************************************************
370 * PFIFO engine
371 ******************************************************************************/
372
373static inline int
374gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn)
375{
376	switch (engn) {
377	case NVDEV_ENGINE_GR    : engn = 0; break;
378	case NVDEV_ENGINE_MSVLD : engn = 1; break;
379	case NVDEV_ENGINE_MSPPP : engn = 2; break;
380	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
381	case NVDEV_ENGINE_CE0   : engn = 4; break;
382	case NVDEV_ENGINE_CE1   : engn = 5; break;
383	default:
384		return -1;
385	}
386
387	return engn;
388}
389
390static inline struct nvkm_engine *
391gf100_fifo_engine(struct gf100_fifo_priv *priv, u32 engn)
392{
393	switch (engn) {
394	case 0: engn = NVDEV_ENGINE_GR; break;
395	case 1: engn = NVDEV_ENGINE_MSVLD; break;
396	case 2: engn = NVDEV_ENGINE_MSPPP; break;
397	case 3: engn = NVDEV_ENGINE_MSPDEC; break;
398	case 4: engn = NVDEV_ENGINE_CE0; break;
399	case 5: engn = NVDEV_ENGINE_CE1; break;
400	default:
401		return NULL;
402	}
403
404	return nvkm_engine(priv, engn);
405}
406
407static void
408gf100_fifo_recover_work(struct work_struct *work)
409{
410	struct gf100_fifo_priv *priv = container_of(work, typeof(*priv), fault);
411	struct nvkm_object *engine;
412	unsigned long flags;
413	u32 engn, engm = 0;
414	u64 mask, todo;
415
416	spin_lock_irqsave(&priv->base.lock, flags);
417	mask = priv->mask;
418	priv->mask = 0ULL;
419	spin_unlock_irqrestore(&priv->base.lock, flags);
420
421	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
422		engm |= 1 << gf100_fifo_engidx(priv, engn);
423	nv_mask(priv, 0x002630, engm, engm);
424
425	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
426		if ((engine = (void *)nvkm_engine(priv, engn))) {
427			nv_ofuncs(engine)->fini(engine, false);
428			WARN_ON(nv_ofuncs(engine)->init(engine));
429		}
430	}
431
432	gf100_fifo_runlist_update(priv);
433	nv_wr32(priv, 0x00262c, engm);
434	nv_mask(priv, 0x002630, engm, 0x00000000);
435}
436
437static void
438gf100_fifo_recover(struct gf100_fifo_priv *priv, struct nvkm_engine *engine,
439		   struct gf100_fifo_chan *chan)
440{
441	u32 chid = chan->base.chid;
442	unsigned long flags;
443
444	nv_error(priv, "%s engine fault on channel %d, recovering...\n",
445		       nv_subdev(engine)->name, chid);
446
447	nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
448	chan->state = KILLED;
449
450	spin_lock_irqsave(&priv->base.lock, flags);
451	priv->mask |= 1ULL << nv_engidx(engine);
452	spin_unlock_irqrestore(&priv->base.lock, flags);
453	schedule_work(&priv->fault);
454}
455
456static int
457gf100_fifo_swmthd(struct gf100_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
458{
459	struct gf100_fifo_chan *chan = NULL;
460	struct nvkm_handle *bind;
461	unsigned long flags;
462	int ret = -EINVAL;
463
464	spin_lock_irqsave(&priv->base.lock, flags);
465	if (likely(chid >= priv->base.min && chid <= priv->base.max))
466		chan = (void *)priv->base.channel[chid];
467	if (unlikely(!chan))
468		goto out;
469
470	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
471	if (likely(bind)) {
472		if (!mthd || !nv_call(bind->object, mthd, data))
473			ret = 0;
474		nvkm_namedb_put(bind);
475	}
476
477out:
478	spin_unlock_irqrestore(&priv->base.lock, flags);
479	return ret;
480}
481
482static const struct nvkm_enum
483gf100_fifo_sched_reason[] = {
484	{ 0x0a, "CTXSW_TIMEOUT" },
485	{}
486};
487
488static void
489gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv)
490{
491	struct nvkm_engine *engine;
492	struct gf100_fifo_chan *chan;
493	u32 engn;
494
495	for (engn = 0; engn < 6; engn++) {
496		u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
497		u32 busy = (stat & 0x80000000);
498		u32 save = (stat & 0x00100000); /* maybe? */
499		u32 unk0 = (stat & 0x00040000);
500		u32 unk1 = (stat & 0x00001000);
501		u32 chid = (stat & 0x0000007f);
502		(void)save;
503
504		if (busy && unk0 && unk1) {
505			if (!(chan = (void *)priv->base.channel[chid]))
506				continue;
507			if (!(engine = gf100_fifo_engine(priv, engn)))
508				continue;
509			gf100_fifo_recover(priv, engine, chan);
510		}
511	}
512}
513
514static void
515gf100_fifo_intr_sched(struct gf100_fifo_priv *priv)
516{
517	u32 intr = nv_rd32(priv, 0x00254c);
518	u32 code = intr & 0x000000ff;
519	const struct nvkm_enum *en;
520	char enunk[6] = "";
521
522	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
523	if (!en)
524		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
525
526	nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
527
528	switch (code) {
529	case 0x0a:
530		gf100_fifo_intr_sched_ctxsw(priv);
531		break;
532	default:
533		break;
534	}
535}
536
537static const struct nvkm_enum
538gf100_fifo_fault_engine[] = {
539	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
540	{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
541	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
542	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
543	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
544	{ 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
545	{ 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
546	{ 0x13, "PCOUNTER" },
547	{ 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
548	{ 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
549	{ 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
550	{ 0x17, "PDAEMON" },
551	{}
552};
553
554static const struct nvkm_enum
555gf100_fifo_fault_reason[] = {
556	{ 0x00, "PT_NOT_PRESENT" },
557	{ 0x01, "PT_TOO_SHORT" },
558	{ 0x02, "PAGE_NOT_PRESENT" },
559	{ 0x03, "VM_LIMIT_EXCEEDED" },
560	{ 0x04, "NO_CHANNEL" },
561	{ 0x05, "PAGE_SYSTEM_ONLY" },
562	{ 0x06, "PAGE_READ_ONLY" },
563	{ 0x0a, "COMPRESSED_SYSRAM" },
564	{ 0x0c, "INVALID_STORAGE_TYPE" },
565	{}
566};
567
568static const struct nvkm_enum
569gf100_fifo_fault_hubclient[] = {
570	{ 0x01, "PCOPY0" },
571	{ 0x02, "PCOPY1" },
572	{ 0x04, "DISPATCH" },
573	{ 0x05, "CTXCTL" },
574	{ 0x06, "PFIFO" },
575	{ 0x07, "BAR_READ" },
576	{ 0x08, "BAR_WRITE" },
577	{ 0x0b, "PVP" },
578	{ 0x0c, "PMSPPP" },
579	{ 0x0d, "PMSVLD" },
580	{ 0x11, "PCOUNTER" },
581	{ 0x12, "PDAEMON" },
582	{ 0x14, "CCACHE" },
583	{ 0x15, "CCACHE_POST" },
584	{}
585};
586
587static const struct nvkm_enum
588gf100_fifo_fault_gpcclient[] = {
589	{ 0x01, "TEX" },
590	{ 0x0c, "ESETUP" },
591	{ 0x0e, "CTXCTL" },
592	{ 0x0f, "PROP" },
593	{}
594};
595
596static void
597gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit)
598{
599	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
600	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
601	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
602	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
603	u32 gpc    = (stat & 0x1f000000) >> 24;
604	u32 client = (stat & 0x00001f00) >> 8;
605	u32 write  = (stat & 0x00000080);
606	u32 hub    = (stat & 0x00000040);
607	u32 reason = (stat & 0x0000000f);
608	struct nvkm_object *engctx = NULL, *object;
609	struct nvkm_engine *engine = NULL;
610	const struct nvkm_enum *er, *eu, *ec;
611	char erunk[6] = "";
612	char euunk[6] = "";
613	char ecunk[6] = "";
614	char gpcid[3] = "";
615
616	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
617	if (!er)
618		snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
619
620	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
621	if (eu) {
622		switch (eu->data2) {
623		case NVDEV_SUBDEV_BAR:
624			nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
625			break;
626		case NVDEV_SUBDEV_INSTMEM:
627			nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
628			break;
629		case NVDEV_ENGINE_IFB:
630			nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
631			break;
632		default:
633			engine = nvkm_engine(priv, eu->data2);
634			if (engine)
635				engctx = nvkm_engctx_get(engine, inst);
636			break;
637		}
638	} else {
639		snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
640	}
641
642	if (hub) {
643		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
644	} else {
645		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
646		snprintf(gpcid, sizeof(gpcid), "%d", gpc);
647	}
648
649	if (!ec)
650		snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
651
652	nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
653		       "channel 0x%010llx [%s]\n", write ? "write" : "read",
654		 (u64)vahi << 32 | valo, er ? er->name : erunk,
655		 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
656		 ec ? ec->name : ecunk, (u64)inst << 12,
657		 nvkm_client_name(engctx));
658
659	object = engctx;
660	while (object) {
661		switch (nv_mclass(object)) {
662		case FERMI_CHANNEL_GPFIFO:
663			gf100_fifo_recover(priv, engine, (void *)object);
664			break;
665		}
666		object = object->parent;
667	}
668
669	nvkm_engctx_put(engctx);
670}
671
672static const struct nvkm_bitfield
673gf100_fifo_pbdma_intr[] = {
674/*	{ 0x00008000, "" }	seen with null ib push */
675	{ 0x00200000, "ILLEGAL_MTHD" },
676	{ 0x00800000, "EMPTY_SUBC" },
677	{}
678};
679
680static void
681gf100_fifo_intr_pbdma(struct gf100_fifo_priv *priv, int unit)
682{
683	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
684	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
685	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
686	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
687	u32 subc = (addr & 0x00070000) >> 16;
688	u32 mthd = (addr & 0x00003ffc);
689	u32 show = stat;
690
691	if (stat & 0x00800000) {
692		if (!gf100_fifo_swmthd(priv, chid, mthd, data))
693			show &= ~0x00800000;
694	}
695
696	if (show) {
697		nv_error(priv, "PBDMA%d:", unit);
698		nvkm_bitfield_print(gf100_fifo_pbdma_intr, show);
699		pr_cont("\n");
700		nv_error(priv,
701			 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
702			 unit, chid,
703			 nvkm_client_name_for_fifo_chid(&priv->base, chid),
704			 subc, mthd, data);
705	}
706
707	nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
708	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
709}
710
711static void
712gf100_fifo_intr_runlist(struct gf100_fifo_priv *priv)
713{
714	u32 intr = nv_rd32(priv, 0x002a00);
715
716	if (intr & 0x10000000) {
717		wake_up(&priv->runlist.wait);
718		nv_wr32(priv, 0x002a00, 0x10000000);
719		intr &= ~0x10000000;
720	}
721
722	if (intr) {
723		nv_error(priv, "RUNLIST 0x%08x\n", intr);
724		nv_wr32(priv, 0x002a00, intr);
725	}
726}
727
728static void
729gf100_fifo_intr_engine_unit(struct gf100_fifo_priv *priv, int engn)
730{
731	u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04));
732	u32 inte = nv_rd32(priv, 0x002628);
733	u32 unkn;
734
735	nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
736
737	for (unkn = 0; unkn < 8; unkn++) {
738		u32 ints = (intr >> (unkn * 0x04)) & inte;
739		if (ints & 0x1) {
740			nvkm_fifo_uevent(&priv->base);
741			ints &= ~1;
742		}
743		if (ints) {
744			nv_error(priv, "ENGINE %d %d %01x", engn, unkn, ints);
745			nv_mask(priv, 0x002628, ints, 0);
746		}
747	}
748}
749
750static void
751gf100_fifo_intr_engine(struct gf100_fifo_priv *priv)
752{
753	u32 mask = nv_rd32(priv, 0x0025a4);
754	while (mask) {
755		u32 unit = __ffs(mask);
756		gf100_fifo_intr_engine_unit(priv, unit);
757		mask &= ~(1 << unit);
758	}
759}
760
761static void
762gf100_fifo_intr(struct nvkm_subdev *subdev)
763{
764	struct gf100_fifo_priv *priv = (void *)subdev;
765	u32 mask = nv_rd32(priv, 0x002140);
766	u32 stat = nv_rd32(priv, 0x002100) & mask;
767
768	if (stat & 0x00000001) {
769		u32 intr = nv_rd32(priv, 0x00252c);
770		nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
771		nv_wr32(priv, 0x002100, 0x00000001);
772		stat &= ~0x00000001;
773	}
774
775	if (stat & 0x00000100) {
776		gf100_fifo_intr_sched(priv);
777		nv_wr32(priv, 0x002100, 0x00000100);
778		stat &= ~0x00000100;
779	}
780
781	if (stat & 0x00010000) {
782		u32 intr = nv_rd32(priv, 0x00256c);
783		nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
784		nv_wr32(priv, 0x002100, 0x00010000);
785		stat &= ~0x00010000;
786	}
787
788	if (stat & 0x01000000) {
789		u32 intr = nv_rd32(priv, 0x00258c);
790		nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
791		nv_wr32(priv, 0x002100, 0x01000000);
792		stat &= ~0x01000000;
793	}
794
795	if (stat & 0x10000000) {
796		u32 mask = nv_rd32(priv, 0x00259c);
797		while (mask) {
798			u32 unit = __ffs(mask);
799			gf100_fifo_intr_fault(priv, unit);
800			nv_wr32(priv, 0x00259c, (1 << unit));
801			mask &= ~(1 << unit);
802		}
803		stat &= ~0x10000000;
804	}
805
806	if (stat & 0x20000000) {
807		u32 mask = nv_rd32(priv, 0x0025a0);
808		while (mask) {
809			u32 unit = __ffs(mask);
810			gf100_fifo_intr_pbdma(priv, unit);
811			nv_wr32(priv, 0x0025a0, (1 << unit));
812			mask &= ~(1 << unit);
813		}
814		stat &= ~0x20000000;
815	}
816
817	if (stat & 0x40000000) {
818		gf100_fifo_intr_runlist(priv);
819		stat &= ~0x40000000;
820	}
821
822	if (stat & 0x80000000) {
823		gf100_fifo_intr_engine(priv);
824		stat &= ~0x80000000;
825	}
826
827	if (stat) {
828		nv_error(priv, "INTR 0x%08x\n", stat);
829		nv_mask(priv, 0x002140, stat, 0x00000000);
830		nv_wr32(priv, 0x002100, stat);
831	}
832}
833
834static void
835gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
836{
837	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
838	nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
839}
840
841static void
842gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
843{
844	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
845	nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
846}
847
848static const struct nvkm_event_func
849gf100_fifo_uevent_func = {
850	.ctor = nvkm_fifo_uevent_ctor,
851	.init = gf100_fifo_uevent_init,
852	.fini = gf100_fifo_uevent_fini,
853};
854
855static int
856gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
857		struct nvkm_oclass *oclass, void *data, u32 size,
858		struct nvkm_object **pobject)
859{
860	struct gf100_fifo_priv *priv;
861	int ret;
862
863	ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &priv);
864	*pobject = nv_object(priv);
865	if (ret)
866		return ret;
867
868	INIT_WORK(&priv->fault, gf100_fifo_recover_work);
869
870	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
871			      &priv->runlist.mem[0]);
872	if (ret)
873		return ret;
874
875	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
876			      &priv->runlist.mem[1]);
877	if (ret)
878		return ret;
879
880	init_waitqueue_head(&priv->runlist.wait);
881
882	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
883			      &priv->user.mem);
884	if (ret)
885		return ret;
886
887	ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
888			      &priv->user.bar);
889	if (ret)
890		return ret;
891
892	ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &priv->base.uevent);
893	if (ret)
894		return ret;
895
896	nv_subdev(priv)->unit = 0x00000100;
897	nv_subdev(priv)->intr = gf100_fifo_intr;
898	nv_engine(priv)->cclass = &gf100_fifo_cclass;
899	nv_engine(priv)->sclass = gf100_fifo_sclass;
900	return 0;
901}
902
903static void
904gf100_fifo_dtor(struct nvkm_object *object)
905{
906	struct gf100_fifo_priv *priv = (void *)object;
907
908	nvkm_gpuobj_unmap(&priv->user.bar);
909	nvkm_gpuobj_ref(NULL, &priv->user.mem);
910	nvkm_gpuobj_ref(NULL, &priv->runlist.mem[0]);
911	nvkm_gpuobj_ref(NULL, &priv->runlist.mem[1]);
912
913	nvkm_fifo_destroy(&priv->base);
914}
915
916static int
917gf100_fifo_init(struct nvkm_object *object)
918{
919	struct gf100_fifo_priv *priv = (void *)object;
920	int ret, i;
921
922	ret = nvkm_fifo_init(&priv->base);
923	if (ret)
924		return ret;
925
926	nv_wr32(priv, 0x000204, 0xffffffff);
927	nv_wr32(priv, 0x002204, 0xffffffff);
928
929	priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
930	nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
931
932	/* assign engines to PBDMAs */
933	if (priv->spoon_nr >= 3) {
934		nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
935		nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
936		nv_wr32(priv, 0x002210, ~(1 << 1)); /* PMSPP */
937		nv_wr32(priv, 0x002214, ~(1 << 1)); /* PMSVLD */
938		nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
939		nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
940	}
941
942	/* PBDMA[n] */
943	for (i = 0; i < priv->spoon_nr; i++) {
944		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
945		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
946		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
947	}
948
949	nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
950	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
951
952	nv_wr32(priv, 0x002100, 0xffffffff);
953	nv_wr32(priv, 0x002140, 0x7fffffff);
954	nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
955	return 0;
956}
957
958struct nvkm_oclass *
959gf100_fifo_oclass = &(struct nvkm_oclass) {
960	.handle = NV_ENGINE(FIFO, 0xc0),
961	.ofuncs = &(struct nvkm_ofuncs) {
962		.ctor = gf100_fifo_ctor,
963		.dtor = gf100_fifo_dtor,
964		.init = gf100_fifo_init,
965		.fini = _nvkm_fifo_fini,
966	},
967};
968