1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include <core/engctx.h>
25#include <core/engine.h>
26#include <core/client.h>
27
28static inline int
29nvkm_engctx_exists(struct nvkm_object *parent,
30		   struct nvkm_engine *engine, void **pobject)
31{
32	struct nvkm_engctx *engctx;
33	struct nvkm_object *parctx;
34
35	list_for_each_entry(engctx, &engine->contexts, head) {
36		parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
37		if (parctx == parent) {
38			atomic_inc(&nv_object(engctx)->refcount);
39			*pobject = engctx;
40			return 1;
41		}
42	}
43
44	return 0;
45}
46
47int
48nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
49		    struct nvkm_oclass *oclass, struct nvkm_object *pargpu,
50		    u32 size, u32 align, u32 flags, int length, void **pobject)
51{
52	struct nvkm_client *client = nvkm_client(parent);
53	struct nvkm_engine *engine = nv_engine(engobj);
54	struct nvkm_object *engctx;
55	unsigned long save;
56	int ret;
57
58	/* check if this engine already has a context for the parent object,
59	 * and reference it instead of creating a new one
60	 */
61	spin_lock_irqsave(&engine->lock, save);
62	ret = nvkm_engctx_exists(parent, engine, pobject);
63	spin_unlock_irqrestore(&engine->lock, save);
64	if (ret)
65		return ret;
66
67	/* create the new context, supports creating both raw objects and
68	 * objects backed by instance memory
69	 */
70	if (size) {
71		ret = nvkm_gpuobj_create_(parent, engobj, oclass,
72					  NV_ENGCTX_CLASS, pargpu, size,
73					  align, flags, length, pobject);
74	} else {
75		ret = nvkm_object_create_(parent, engobj, oclass,
76					  NV_ENGCTX_CLASS, length, pobject);
77	}
78
79	engctx = *pobject;
80	if (ret)
81		return ret;
82
83	/* must take the lock again and re-check a context doesn't already
84	 * exist (in case of a race) - the lock had to be dropped before as
85	 * it's not possible to allocate the object with it held.
86	 */
87	spin_lock_irqsave(&engine->lock, save);
88	ret = nvkm_engctx_exists(parent, engine, pobject);
89	if (ret) {
90		spin_unlock_irqrestore(&engine->lock, save);
91		nvkm_object_ref(NULL, &engctx);
92		return ret;
93	}
94
95	if (client->vm)
96		atomic_inc(&client->vm->engref[nv_engidx(engine)]);
97	list_add(&nv_engctx(engctx)->head, &engine->contexts);
98	nv_engctx(engctx)->addr = ~0ULL;
99	spin_unlock_irqrestore(&engine->lock, save);
100	return 0;
101}
102
103void
104nvkm_engctx_destroy(struct nvkm_engctx *engctx)
105{
106	struct nvkm_engine *engine = engctx->gpuobj.object.engine;
107	struct nvkm_client *client = nvkm_client(engctx);
108	unsigned long save;
109
110	nvkm_gpuobj_unmap(&engctx->vma);
111	spin_lock_irqsave(&engine->lock, save);
112	list_del(&engctx->head);
113	spin_unlock_irqrestore(&engine->lock, save);
114
115	if (client->vm)
116		atomic_dec(&client->vm->engref[nv_engidx(engine)]);
117
118	if (engctx->gpuobj.size)
119		nvkm_gpuobj_destroy(&engctx->gpuobj);
120	else
121		nvkm_object_destroy(&engctx->gpuobj.object);
122}
123
124int
125nvkm_engctx_init(struct nvkm_engctx *engctx)
126{
127	struct nvkm_object *object = nv_object(engctx);
128	struct nvkm_subdev *subdev = nv_subdev(object->engine);
129	struct nvkm_object *parent;
130	struct nvkm_subdev *pardev;
131	int ret;
132
133	ret = nvkm_gpuobj_init(&engctx->gpuobj);
134	if (ret)
135		return ret;
136
137	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
138	pardev = nv_subdev(parent->engine);
139	if (nv_parent(parent)->context_attach) {
140		mutex_lock(&pardev->mutex);
141		ret = nv_parent(parent)->context_attach(parent, object);
142		mutex_unlock(&pardev->mutex);
143	}
144
145	if (ret) {
146		nv_error(parent, "failed to attach %s context, %d\n",
147			 subdev->name, ret);
148		return ret;
149	}
150
151	nv_debug(parent, "attached %s context\n", subdev->name);
152	return 0;
153}
154
155int
156nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
157{
158	struct nvkm_object *object = nv_object(engctx);
159	struct nvkm_subdev *subdev = nv_subdev(object->engine);
160	struct nvkm_object *parent;
161	struct nvkm_subdev *pardev;
162	int ret = 0;
163
164	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
165	pardev = nv_subdev(parent->engine);
166	if (nv_parent(parent)->context_detach) {
167		mutex_lock(&pardev->mutex);
168		ret = nv_parent(parent)->context_detach(parent, suspend, object);
169		mutex_unlock(&pardev->mutex);
170	}
171
172	if (ret) {
173		nv_error(parent, "failed to detach %s context, %d\n",
174			 subdev->name, ret);
175		return ret;
176	}
177
178	nv_debug(parent, "detached %s context\n", subdev->name);
179	return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
180}
181
182int
183_nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
184		  struct nvkm_oclass *oclass, void *data, u32 size,
185		  struct nvkm_object **pobject)
186{
187	struct nvkm_engctx *engctx;
188	int ret;
189
190	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256,
191				 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
192	*pobject = nv_object(engctx);
193	return ret;
194}
195
196void
197_nvkm_engctx_dtor(struct nvkm_object *object)
198{
199	nvkm_engctx_destroy(nv_engctx(object));
200}
201
202int
203_nvkm_engctx_init(struct nvkm_object *object)
204{
205	return nvkm_engctx_init(nv_engctx(object));
206}
207
208int
209_nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
210{
211	return nvkm_engctx_fini(nv_engctx(object), suspend);
212}
213
214struct nvkm_object *
215nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
216{
217	struct nvkm_engctx *engctx;
218	unsigned long flags;
219
220	spin_lock_irqsave(&engine->lock, flags);
221	list_for_each_entry(engctx, &engine->contexts, head) {
222		if (engctx->addr == addr) {
223			engctx->save = flags;
224			return nv_object(engctx);
225		}
226	}
227	spin_unlock_irqrestore(&engine->lock, flags);
228	return NULL;
229}
230
231void
232nvkm_engctx_put(struct nvkm_object *object)
233{
234	if (object) {
235		struct nvkm_engine *engine = nv_engine(object->engine);
236		struct nvkm_engctx *engctx = nv_engctx(object);
237		spin_unlock_irqrestore(&engine->lock, engctx->save);
238	}
239}
240