1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 *	Eunchul Kim <chulspro.kim@samsung.com>
5 *	Jinyoung Jeon <jy0.jeon@samsung.com>
6 *	Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute  it and/or modify it
9 * under  the terms of  the GNU General  Public License as published by the
10 * Free Software Foundation;  either version 2 of the  License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/clk.h>
17#include <linux/pm_runtime.h>
18#include <plat/map-base.h>
19
20#include <drm/drmP.h>
21#include <drm/exynos_drm.h>
22#include "regs-gsc.h"
23#include "exynos_drm_drv.h"
24#include "exynos_drm_ipp.h"
25#include "exynos_drm_gsc.h"
26
27/*
28 * GSC stands for General SCaler and
29 * supports image scaler/rotator and input/output DMA operations.
30 * input DMA reads image data from the memory.
31 * output DMA writes image data to memory.
32 * GSC supports image rotation and image effect functions.
33 *
34 * M2M operation : supports crop/scale/rotation/csc so on.
35 * Memory ----> GSC H/W ----> Memory.
36 * Writeback operation : supports cloned screen with FIMD.
37 * FIMD ----> GSC H/W ----> Memory.
38 * Output operation : supports direct display using local path.
39 * Memory ----> GSC H/W ----> FIMD, Mixer.
40 */
41
42/*
43 * TODO
44 * 1. check suspend/resume api if needed.
45 * 2. need to check use case platform_device_id.
46 * 3. check src/dst size with, height.
47 * 4. added check_prepare api for right register.
48 * 5. need to add supported list in prop_list.
49 * 6. check prescaler/scaler optimization.
50 */
51
52#define GSC_MAX_DEVS	4
53#define GSC_MAX_SRC		4
54#define GSC_MAX_DST		16
55#define GSC_RESET_TIMEOUT	50
56#define GSC_BUF_STOP	1
57#define GSC_BUF_START	2
58#define GSC_REG_SZ		16
59#define GSC_WIDTH_ITU_709	1280
60#define GSC_SC_UP_MAX_RATIO		65536
61#define GSC_SC_DOWN_RATIO_7_8		74898
62#define GSC_SC_DOWN_RATIO_6_8		87381
63#define GSC_SC_DOWN_RATIO_5_8		104857
64#define GSC_SC_DOWN_RATIO_4_8		131072
65#define GSC_SC_DOWN_RATIO_3_8		174762
66#define GSC_SC_DOWN_RATIO_2_8		262144
67#define GSC_REFRESH_MIN	12
68#define GSC_REFRESH_MAX	60
69#define GSC_CROP_MAX	8192
70#define GSC_CROP_MIN	32
71#define GSC_SCALE_MAX	4224
72#define GSC_SCALE_MIN	32
73#define GSC_COEF_RATIO	7
74#define GSC_COEF_PHASE	9
75#define GSC_COEF_ATTR	16
76#define GSC_COEF_H_8T	8
77#define GSC_COEF_V_4T	4
78#define GSC_COEF_DEPTH	3
79
80#define get_gsc_context(dev)	platform_get_drvdata(to_platform_device(dev))
81#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
82					struct gsc_context, ippdrv);
83#define gsc_read(offset)		readl(ctx->regs + (offset))
84#define gsc_write(cfg, offset)	writel(cfg, ctx->regs + (offset))
85
86/*
87 * A structure of scaler.
88 *
89 * @range: narrow, wide.
90 * @pre_shfactor: pre sclaer shift factor.
91 * @pre_hratio: horizontal ratio of the prescaler.
92 * @pre_vratio: vertical ratio of the prescaler.
93 * @main_hratio: the main scaler's horizontal ratio.
94 * @main_vratio: the main scaler's vertical ratio.
95 */
96struct gsc_scaler {
97	bool	range;
98	u32	pre_shfactor;
99	u32	pre_hratio;
100	u32	pre_vratio;
101	unsigned long main_hratio;
102	unsigned long main_vratio;
103};
104
105/*
106 * A structure of scaler capability.
107 *
108 * find user manual 49.2 features.
109 * @tile_w: tile mode or rotation width.
110 * @tile_h: tile mode or rotation height.
111 * @w: other cases width.
112 * @h: other cases height.
113 */
114struct gsc_capability {
115	/* tile or rotation */
116	u32	tile_w;
117	u32	tile_h;
118	/* other cases */
119	u32	w;
120	u32	h;
121};
122
123/*
124 * A structure of gsc context.
125 *
126 * @ippdrv: prepare initialization using ippdrv.
127 * @regs_res: register resources.
128 * @regs: memory mapped io registers.
129 * @lock: locking of operations.
130 * @gsc_clk: gsc gate clock.
131 * @sc: scaler infomations.
132 * @id: gsc id.
133 * @irq: irq number.
134 * @rotation: supports rotation of src.
135 * @suspended: qos operations.
136 */
137struct gsc_context {
138	struct exynos_drm_ippdrv	ippdrv;
139	struct resource	*regs_res;
140	void __iomem	*regs;
141	struct mutex	lock;
142	struct clk	*gsc_clk;
143	struct gsc_scaler	sc;
144	int	id;
145	int	irq;
146	bool	rotation;
147	bool	suspended;
148};
149
150/* 8-tap Filter Coefficient */
151static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
152	{	/* Ratio <= 65536 (~8:8) */
153		{  0,  0,   0, 128,   0,   0,  0,  0 },
154		{ -1,  2,  -6, 127,   7,  -2,  1,  0 },
155		{ -1,  4, -12, 125,  16,  -5,  1,  0 },
156		{ -1,  5, -15, 120,  25,  -8,  2,  0 },
157		{ -1,  6, -18, 114,  35, -10,  3, -1 },
158		{ -1,  6, -20, 107,  46, -13,  4, -1 },
159		{ -2,  7, -21,  99,  57, -16,  5, -1 },
160		{ -1,  6, -20,  89,  68, -18,  5, -1 },
161		{ -1,  6, -20,  79,  79, -20,  6, -1 },
162		{ -1,  5, -18,  68,  89, -20,  6, -1 },
163		{ -1,  5, -16,  57,  99, -21,  7, -2 },
164		{ -1,  4, -13,  46, 107, -20,  6, -1 },
165		{ -1,  3, -10,  35, 114, -18,  6, -1 },
166		{  0,  2,  -8,  25, 120, -15,  5, -1 },
167		{  0,  1,  -5,  16, 125, -12,  4, -1 },
168		{  0,  1,  -2,   7, 127,  -6,  2, -1 }
169	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
170		{  3, -8,  14, 111,  13,  -8,  3,  0 },
171		{  2, -6,   7, 112,  21, -10,  3, -1 },
172		{  2, -4,   1, 110,  28, -12,  4, -1 },
173		{  1, -2,  -3, 106,  36, -13,  4, -1 },
174		{  1, -1,  -7, 103,  44, -15,  4, -1 },
175		{  1,  1, -11,  97,  53, -16,  4, -1 },
176		{  0,  2, -13,  91,  61, -16,  4, -1 },
177		{  0,  3, -15,  85,  69, -17,  4, -1 },
178		{  0,  3, -16,  77,  77, -16,  3,  0 },
179		{ -1,  4, -17,  69,  85, -15,  3,  0 },
180		{ -1,  4, -16,  61,  91, -13,  2,  0 },
181		{ -1,  4, -16,  53,  97, -11,  1,  1 },
182		{ -1,  4, -15,  44, 103,  -7, -1,  1 },
183		{ -1,  4, -13,  36, 106,  -3, -2,  1 },
184		{ -1,  4, -12,  28, 110,   1, -4,  2 },
185		{ -1,  3, -10,  21, 112,   7, -6,  2 }
186	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
187		{ 2, -11,  25,  96, 25, -11,   2,  0 },
188		{ 2, -10,  19,  96, 31, -12,   2,  0 },
189		{ 2,  -9,  14,  94, 37, -12,   2,  0 },
190		{ 2,  -8,  10,  92, 43, -12,   1,  0 },
191		{ 2,  -7,   5,  90, 49, -12,   1,  0 },
192		{ 2,  -5,   1,  86, 55, -12,   0,  1 },
193		{ 2,  -4,  -2,  82, 61, -11,  -1,  1 },
194		{ 1,  -3,  -5,  77, 67,  -9,  -1,  1 },
195		{ 1,  -2,  -7,  72, 72,  -7,  -2,  1 },
196		{ 1,  -1,  -9,  67, 77,  -5,  -3,  1 },
197		{ 1,  -1, -11,  61, 82,  -2,  -4,  2 },
198		{ 1,   0, -12,  55, 86,   1,  -5,  2 },
199		{ 0,   1, -12,  49, 90,   5,  -7,  2 },
200		{ 0,   1, -12,  43, 92,  10,  -8,  2 },
201		{ 0,   2, -12,  37, 94,  14,  -9,  2 },
202		{ 0,   2, -12,  31, 96,  19, -10,  2 }
203	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
204		{ -1,  -8, 33,  80, 33,  -8,  -1,  0 },
205		{ -1,  -8, 28,  80, 37,  -7,  -2,  1 },
206		{  0,  -8, 24,  79, 41,  -7,  -2,  1 },
207		{  0,  -8, 20,  78, 46,  -6,  -3,  1 },
208		{  0,  -8, 16,  76, 50,  -4,  -3,  1 },
209		{  0,  -7, 13,  74, 54,  -3,  -4,  1 },
210		{  1,  -7, 10,  71, 58,  -1,  -5,  1 },
211		{  1,  -6,  6,  68, 62,   1,  -5,  1 },
212		{  1,  -6,  4,  65, 65,   4,  -6,  1 },
213		{  1,  -5,  1,  62, 68,   6,  -6,  1 },
214		{  1,  -5, -1,  58, 71,  10,  -7,  1 },
215		{  1,  -4, -3,  54, 74,  13,  -7,  0 },
216		{  1,  -3, -4,  50, 76,  16,  -8,  0 },
217		{  1,  -3, -6,  46, 78,  20,  -8,  0 },
218		{  1,  -2, -7,  41, 79,  24,  -8,  0 },
219		{  1,  -2, -7,  37, 80,  28,  -8, -1 }
220	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
221		{ -3,   0, 35,  64, 35,   0,  -3,  0 },
222		{ -3,  -1, 32,  64, 38,   1,  -3,  0 },
223		{ -2,  -2, 29,  63, 41,   2,  -3,  0 },
224		{ -2,  -3, 27,  63, 43,   4,  -4,  0 },
225		{ -2,  -3, 24,  61, 46,   6,  -4,  0 },
226		{ -2,  -3, 21,  60, 49,   7,  -4,  0 },
227		{ -1,  -4, 19,  59, 51,   9,  -4, -1 },
228		{ -1,  -4, 16,  57, 53,  12,  -4, -1 },
229		{ -1,  -4, 14,  55, 55,  14,  -4, -1 },
230		{ -1,  -4, 12,  53, 57,  16,  -4, -1 },
231		{ -1,  -4,  9,  51, 59,  19,  -4, -1 },
232		{  0,  -4,  7,  49, 60,  21,  -3, -2 },
233		{  0,  -4,  6,  46, 61,  24,  -3, -2 },
234		{  0,  -4,  4,  43, 63,  27,  -3, -2 },
235		{  0,  -3,  2,  41, 63,  29,  -2, -2 },
236		{  0,  -3,  1,  38, 64,  32,  -1, -3 }
237	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
238		{ -1,   8, 33,  48, 33,   8,  -1,  0 },
239		{ -1,   7, 31,  49, 35,   9,  -1, -1 },
240		{ -1,   6, 30,  49, 36,  10,  -1, -1 },
241		{ -1,   5, 28,  48, 38,  12,  -1, -1 },
242		{ -1,   4, 26,  48, 39,  13,   0, -1 },
243		{ -1,   3, 24,  47, 41,  15,   0, -1 },
244		{ -1,   2, 23,  47, 42,  16,   0, -1 },
245		{ -1,   2, 21,  45, 43,  18,   1, -1 },
246		{ -1,   1, 19,  45, 45,  19,   1, -1 },
247		{ -1,   1, 18,  43, 45,  21,   2, -1 },
248		{ -1,   0, 16,  42, 47,  23,   2, -1 },
249		{ -1,   0, 15,  41, 47,  24,   3, -1 },
250		{ -1,   0, 13,  39, 48,  26,   4, -1 },
251		{ -1,  -1, 12,  38, 48,  28,   5, -1 },
252		{ -1,  -1, 10,  36, 49,  30,   6, -1 },
253		{ -1,  -1,  9,  35, 49,  31,   7, -1 }
254	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
255		{  2,  13, 30,  38, 30,  13,   2,  0 },
256		{  2,  12, 29,  38, 30,  14,   3,  0 },
257		{  2,  11, 28,  38, 31,  15,   3,  0 },
258		{  2,  10, 26,  38, 32,  16,   4,  0 },
259		{  1,  10, 26,  37, 33,  17,   4,  0 },
260		{  1,   9, 24,  37, 34,  18,   5,  0 },
261		{  1,   8, 24,  37, 34,  19,   5,  0 },
262		{  1,   7, 22,  36, 35,  20,   6,  1 },
263		{  1,   6, 21,  36, 36,  21,   6,  1 },
264		{  1,   6, 20,  35, 36,  22,   7,  1 },
265		{  0,   5, 19,  34, 37,  24,   8,  1 },
266		{  0,   5, 18,  34, 37,  24,   9,  1 },
267		{  0,   4, 17,  33, 37,  26,  10,  1 },
268		{  0,   4, 16,  32, 38,  26,  10,  2 },
269		{  0,   3, 15,  31, 38,  28,  11,  2 },
270		{  0,   3, 14,  30, 38,  29,  12,  2 }
271	}
272};
273
274/* 4-tap Filter Coefficient */
275static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
276	{	/* Ratio <= 65536 (~8:8) */
277		{  0, 128,   0,  0 },
278		{ -4, 127,   5,  0 },
279		{ -6, 124,  11, -1 },
280		{ -8, 118,  19, -1 },
281		{ -8, 111,  27, -2 },
282		{ -8, 102,  37, -3 },
283		{ -8,  92,  48, -4 },
284		{ -7,  81,  59, -5 },
285		{ -6,  70,  70, -6 },
286		{ -5,  59,  81, -7 },
287		{ -4,  48,  92, -8 },
288		{ -3,  37, 102, -8 },
289		{ -2,  27, 111, -8 },
290		{ -1,  19, 118, -8 },
291		{ -1,  11, 124, -6 },
292		{  0,   5, 127, -4 }
293	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
294		{  8, 112,   8,  0 },
295		{  4, 111,  14, -1 },
296		{  1, 109,  20, -2 },
297		{ -2, 105,  27, -2 },
298		{ -3, 100,  34, -3 },
299		{ -5,  93,  43, -3 },
300		{ -5,  86,  51, -4 },
301		{ -5,  77,  60, -4 },
302		{ -5,  69,  69, -5 },
303		{ -4,  60,  77, -5 },
304		{ -4,  51,  86, -5 },
305		{ -3,  43,  93, -5 },
306		{ -3,  34, 100, -3 },
307		{ -2,  27, 105, -2 },
308		{ -2,  20, 109,  1 },
309		{ -1,  14, 111,  4 }
310	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
311		{ 16,  96,  16,  0 },
312		{ 12,  97,  21, -2 },
313		{  8,  96,  26, -2 },
314		{  5,  93,  32, -2 },
315		{  2,  89,  39, -2 },
316		{  0,  84,  46, -2 },
317		{ -1,  79,  53, -3 },
318		{ -2,  73,  59, -2 },
319		{ -2,  66,  66, -2 },
320		{ -2,  59,  73, -2 },
321		{ -3,  53,  79, -1 },
322		{ -2,  46,  84,  0 },
323		{ -2,  39,  89,  2 },
324		{ -2,  32,  93,  5 },
325		{ -2,  26,  96,  8 },
326		{ -2,  21,  97, 12 }
327	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
328		{ 22,  84,  22,  0 },
329		{ 18,  85,  26, -1 },
330		{ 14,  84,  31, -1 },
331		{ 11,  82,  36, -1 },
332		{  8,  79,  42, -1 },
333		{  6,  76,  47, -1 },
334		{  4,  72,  52,  0 },
335		{  2,  68,  58,  0 },
336		{  1,  63,  63,  1 },
337		{  0,  58,  68,  2 },
338		{  0,  52,  72,  4 },
339		{ -1,  47,  76,  6 },
340		{ -1,  42,  79,  8 },
341		{ -1,  36,  82, 11 },
342		{ -1,  31,  84, 14 },
343		{ -1,  26,  85, 18 }
344	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
345		{ 26,  76,  26,  0 },
346		{ 22,  76,  30,  0 },
347		{ 19,  75,  34,  0 },
348		{ 16,  73,  38,  1 },
349		{ 13,  71,  43,  1 },
350		{ 10,  69,  47,  2 },
351		{  8,  66,  51,  3 },
352		{  6,  63,  55,  4 },
353		{  5,  59,  59,  5 },
354		{  4,  55,  63,  6 },
355		{  3,  51,  66,  8 },
356		{  2,  47,  69, 10 },
357		{  1,  43,  71, 13 },
358		{  1,  38,  73, 16 },
359		{  0,  34,  75, 19 },
360		{  0,  30,  76, 22 }
361	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
362		{ 29,  70,  29,  0 },
363		{ 26,  68,  32,  2 },
364		{ 23,  67,  36,  2 },
365		{ 20,  66,  39,  3 },
366		{ 17,  65,  43,  3 },
367		{ 15,  63,  46,  4 },
368		{ 12,  61,  50,  5 },
369		{ 10,  58,  53,  7 },
370		{  8,  56,  56,  8 },
371		{  7,  53,  58, 10 },
372		{  5,  50,  61, 12 },
373		{  4,  46,  63, 15 },
374		{  3,  43,  65, 17 },
375		{  3,  39,  66, 20 },
376		{  2,  36,  67, 23 },
377		{  2,  32,  68, 26 }
378	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
379		{ 32,  64,  32,  0 },
380		{ 28,  63,  34,  3 },
381		{ 25,  62,  37,  4 },
382		{ 22,  62,  40,  4 },
383		{ 19,  61,  43,  5 },
384		{ 17,  59,  46,  6 },
385		{ 15,  58,  48,  7 },
386		{ 13,  55,  51,  9 },
387		{ 11,  53,  53, 11 },
388		{  9,  51,  55, 13 },
389		{  7,  48,  58, 15 },
390		{  6,  46,  59, 17 },
391		{  5,  43,  61, 19 },
392		{  4,  40,  62, 22 },
393		{  4,  37,  62, 25 },
394		{  3,  34,  63, 28 }
395	}
396};
397
398static int gsc_sw_reset(struct gsc_context *ctx)
399{
400	u32 cfg;
401	int count = GSC_RESET_TIMEOUT;
402
403	/* s/w reset */
404	cfg = (GSC_SW_RESET_SRESET);
405	gsc_write(cfg, GSC_SW_RESET);
406
407	/* wait s/w reset complete */
408	while (count--) {
409		cfg = gsc_read(GSC_SW_RESET);
410		if (!cfg)
411			break;
412		usleep_range(1000, 2000);
413	}
414
415	if (cfg) {
416		DRM_ERROR("failed to reset gsc h/w.\n");
417		return -EBUSY;
418	}
419
420	/* reset sequence */
421	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
422	cfg |= (GSC_IN_BASE_ADDR_MASK |
423		GSC_IN_BASE_ADDR_PINGPONG(0));
424	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
425	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
426	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
427
428	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
429	cfg |= (GSC_OUT_BASE_ADDR_MASK |
430		GSC_OUT_BASE_ADDR_PINGPONG(0));
431	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
432	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
433	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
434
435	return 0;
436}
437
438static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
439{
440	u32 gscblk_cfg;
441
442	gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
443
444	if (enable)
445		gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
446				GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
447				GSC_BLK_SW_RESET_WB_DEST(ctx->id);
448	else
449		gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
450
451	writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
452}
453
454static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
455		bool overflow, bool done)
456{
457	u32 cfg;
458
459	DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n",
460			enable, overflow, done);
461
462	cfg = gsc_read(GSC_IRQ);
463	cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
464
465	if (enable)
466		cfg |= GSC_IRQ_ENABLE;
467	else
468		cfg &= ~GSC_IRQ_ENABLE;
469
470	if (overflow)
471		cfg &= ~GSC_IRQ_OR_MASK;
472	else
473		cfg |= GSC_IRQ_OR_MASK;
474
475	if (done)
476		cfg &= ~GSC_IRQ_FRMDONE_MASK;
477	else
478		cfg |= GSC_IRQ_FRMDONE_MASK;
479
480	gsc_write(cfg, GSC_IRQ);
481}
482
483
484static int gsc_src_set_fmt(struct device *dev, u32 fmt)
485{
486	struct gsc_context *ctx = get_gsc_context(dev);
487	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
488	u32 cfg;
489
490	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
491
492	cfg = gsc_read(GSC_IN_CON);
493	cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
494		 GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
495		 GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
496		 GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
497
498	switch (fmt) {
499	case DRM_FORMAT_RGB565:
500		cfg |= GSC_IN_RGB565;
501		break;
502	case DRM_FORMAT_XRGB8888:
503		cfg |= GSC_IN_XRGB8888;
504		break;
505	case DRM_FORMAT_BGRX8888:
506		cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
507		break;
508	case DRM_FORMAT_YUYV:
509		cfg |= (GSC_IN_YUV422_1P |
510			GSC_IN_YUV422_1P_ORDER_LSB_Y |
511			GSC_IN_CHROMA_ORDER_CBCR);
512		break;
513	case DRM_FORMAT_YVYU:
514		cfg |= (GSC_IN_YUV422_1P |
515			GSC_IN_YUV422_1P_ORDER_LSB_Y |
516			GSC_IN_CHROMA_ORDER_CRCB);
517		break;
518	case DRM_FORMAT_UYVY:
519		cfg |= (GSC_IN_YUV422_1P |
520			GSC_IN_YUV422_1P_OEDER_LSB_C |
521			GSC_IN_CHROMA_ORDER_CBCR);
522		break;
523	case DRM_FORMAT_VYUY:
524		cfg |= (GSC_IN_YUV422_1P |
525			GSC_IN_YUV422_1P_OEDER_LSB_C |
526			GSC_IN_CHROMA_ORDER_CRCB);
527		break;
528	case DRM_FORMAT_NV21:
529	case DRM_FORMAT_NV61:
530		cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
531			GSC_IN_YUV420_2P);
532		break;
533	case DRM_FORMAT_YUV422:
534		cfg |= GSC_IN_YUV422_3P;
535		break;
536	case DRM_FORMAT_YUV420:
537	case DRM_FORMAT_YVU420:
538		cfg |= GSC_IN_YUV420_3P;
539		break;
540	case DRM_FORMAT_NV12:
541	case DRM_FORMAT_NV16:
542		cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
543			GSC_IN_YUV420_2P);
544		break;
545	default:
546		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
547		return -EINVAL;
548	}
549
550	gsc_write(cfg, GSC_IN_CON);
551
552	return 0;
553}
554
555static int gsc_src_set_transf(struct device *dev,
556		enum drm_exynos_degree degree,
557		enum drm_exynos_flip flip, bool *swap)
558{
559	struct gsc_context *ctx = get_gsc_context(dev);
560	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
561	u32 cfg;
562
563	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
564
565	cfg = gsc_read(GSC_IN_CON);
566	cfg &= ~GSC_IN_ROT_MASK;
567
568	switch (degree) {
569	case EXYNOS_DRM_DEGREE_0:
570		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
571			cfg |= GSC_IN_ROT_XFLIP;
572		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
573			cfg |= GSC_IN_ROT_YFLIP;
574		break;
575	case EXYNOS_DRM_DEGREE_90:
576		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
577			cfg |= GSC_IN_ROT_90_XFLIP;
578		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
579			cfg |= GSC_IN_ROT_90_YFLIP;
580		else
581			cfg |= GSC_IN_ROT_90;
582		break;
583	case EXYNOS_DRM_DEGREE_180:
584		cfg |= GSC_IN_ROT_180;
585		break;
586	case EXYNOS_DRM_DEGREE_270:
587		cfg |= GSC_IN_ROT_270;
588		break;
589	default:
590		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
591		return -EINVAL;
592	}
593
594	gsc_write(cfg, GSC_IN_CON);
595
596	ctx->rotation = cfg &
597		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
598	*swap = ctx->rotation;
599
600	return 0;
601}
602
603static int gsc_src_set_size(struct device *dev, int swap,
604		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
605{
606	struct gsc_context *ctx = get_gsc_context(dev);
607	struct drm_exynos_pos img_pos = *pos;
608	struct gsc_scaler *sc = &ctx->sc;
609	u32 cfg;
610
611	DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
612		swap, pos->x, pos->y, pos->w, pos->h);
613
614	if (swap) {
615		img_pos.w = pos->h;
616		img_pos.h = pos->w;
617	}
618
619	/* pixel offset */
620	cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
621		GSC_SRCIMG_OFFSET_Y(img_pos.y));
622	gsc_write(cfg, GSC_SRCIMG_OFFSET);
623
624	/* cropped size */
625	cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
626		GSC_CROPPED_HEIGHT(img_pos.h));
627	gsc_write(cfg, GSC_CROPPED_SIZE);
628
629	DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
630
631	/* original size */
632	cfg = gsc_read(GSC_SRCIMG_SIZE);
633	cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
634		GSC_SRCIMG_WIDTH_MASK);
635
636	cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
637		GSC_SRCIMG_HEIGHT(sz->vsize));
638
639	gsc_write(cfg, GSC_SRCIMG_SIZE);
640
641	cfg = gsc_read(GSC_IN_CON);
642	cfg &= ~GSC_IN_RGB_TYPE_MASK;
643
644	DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range);
645
646	if (pos->w >= GSC_WIDTH_ITU_709)
647		if (sc->range)
648			cfg |= GSC_IN_RGB_HD_WIDE;
649		else
650			cfg |= GSC_IN_RGB_HD_NARROW;
651	else
652		if (sc->range)
653			cfg |= GSC_IN_RGB_SD_WIDE;
654		else
655			cfg |= GSC_IN_RGB_SD_NARROW;
656
657	gsc_write(cfg, GSC_IN_CON);
658
659	return 0;
660}
661
662static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
663		enum drm_exynos_ipp_buf_type buf_type)
664{
665	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
666	bool masked;
667	u32 cfg;
668	u32 mask = 0x00000001 << buf_id;
669
670	DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
671
672	/* mask register set */
673	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
674
675	switch (buf_type) {
676	case IPP_BUF_ENQUEUE:
677		masked = false;
678		break;
679	case IPP_BUF_DEQUEUE:
680		masked = true;
681		break;
682	default:
683		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
684		return -EINVAL;
685	}
686
687	/* sequence id */
688	cfg &= ~mask;
689	cfg |= masked << buf_id;
690	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
691	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
692	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
693
694	return 0;
695}
696
697static int gsc_src_set_addr(struct device *dev,
698		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
699		enum drm_exynos_ipp_buf_type buf_type)
700{
701	struct gsc_context *ctx = get_gsc_context(dev);
702	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
703	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
704	struct drm_exynos_ipp_property *property;
705
706	if (!c_node) {
707		DRM_ERROR("failed to get c_node.\n");
708		return -EFAULT;
709	}
710
711	property = &c_node->property;
712
713	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
714		property->prop_id, buf_id, buf_type);
715
716	if (buf_id > GSC_MAX_SRC) {
717		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
718		return -EINVAL;
719	}
720
721	/* address register set */
722	switch (buf_type) {
723	case IPP_BUF_ENQUEUE:
724		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
725			GSC_IN_BASE_ADDR_Y(buf_id));
726		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
727			GSC_IN_BASE_ADDR_CB(buf_id));
728		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
729			GSC_IN_BASE_ADDR_CR(buf_id));
730		break;
731	case IPP_BUF_DEQUEUE:
732		gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
733		gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
734		gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
735		break;
736	default:
737		/* bypass */
738		break;
739	}
740
741	return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
742}
743
744static struct exynos_drm_ipp_ops gsc_src_ops = {
745	.set_fmt = gsc_src_set_fmt,
746	.set_transf = gsc_src_set_transf,
747	.set_size = gsc_src_set_size,
748	.set_addr = gsc_src_set_addr,
749};
750
751static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
752{
753	struct gsc_context *ctx = get_gsc_context(dev);
754	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
755	u32 cfg;
756
757	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
758
759	cfg = gsc_read(GSC_OUT_CON);
760	cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
761		 GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
762		 GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
763		 GSC_OUT_GLOBAL_ALPHA_MASK);
764
765	switch (fmt) {
766	case DRM_FORMAT_RGB565:
767		cfg |= GSC_OUT_RGB565;
768		break;
769	case DRM_FORMAT_XRGB8888:
770		cfg |= GSC_OUT_XRGB8888;
771		break;
772	case DRM_FORMAT_BGRX8888:
773		cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
774		break;
775	case DRM_FORMAT_YUYV:
776		cfg |= (GSC_OUT_YUV422_1P |
777			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
778			GSC_OUT_CHROMA_ORDER_CBCR);
779		break;
780	case DRM_FORMAT_YVYU:
781		cfg |= (GSC_OUT_YUV422_1P |
782			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
783			GSC_OUT_CHROMA_ORDER_CRCB);
784		break;
785	case DRM_FORMAT_UYVY:
786		cfg |= (GSC_OUT_YUV422_1P |
787			GSC_OUT_YUV422_1P_OEDER_LSB_C |
788			GSC_OUT_CHROMA_ORDER_CBCR);
789		break;
790	case DRM_FORMAT_VYUY:
791		cfg |= (GSC_OUT_YUV422_1P |
792			GSC_OUT_YUV422_1P_OEDER_LSB_C |
793			GSC_OUT_CHROMA_ORDER_CRCB);
794		break;
795	case DRM_FORMAT_NV21:
796	case DRM_FORMAT_NV61:
797		cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
798		break;
799	case DRM_FORMAT_YUV422:
800	case DRM_FORMAT_YUV420:
801	case DRM_FORMAT_YVU420:
802		cfg |= GSC_OUT_YUV420_3P;
803		break;
804	case DRM_FORMAT_NV12:
805	case DRM_FORMAT_NV16:
806		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
807			GSC_OUT_YUV420_2P);
808		break;
809	default:
810		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
811		return -EINVAL;
812	}
813
814	gsc_write(cfg, GSC_OUT_CON);
815
816	return 0;
817}
818
819static int gsc_dst_set_transf(struct device *dev,
820		enum drm_exynos_degree degree,
821		enum drm_exynos_flip flip, bool *swap)
822{
823	struct gsc_context *ctx = get_gsc_context(dev);
824	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
825	u32 cfg;
826
827	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
828
829	cfg = gsc_read(GSC_IN_CON);
830	cfg &= ~GSC_IN_ROT_MASK;
831
832	switch (degree) {
833	case EXYNOS_DRM_DEGREE_0:
834		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
835			cfg |= GSC_IN_ROT_XFLIP;
836		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
837			cfg |= GSC_IN_ROT_YFLIP;
838		break;
839	case EXYNOS_DRM_DEGREE_90:
840		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
841			cfg |= GSC_IN_ROT_90_XFLIP;
842		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
843			cfg |= GSC_IN_ROT_90_YFLIP;
844		else
845			cfg |= GSC_IN_ROT_90;
846		break;
847	case EXYNOS_DRM_DEGREE_180:
848		cfg |= GSC_IN_ROT_180;
849		break;
850	case EXYNOS_DRM_DEGREE_270:
851		cfg |= GSC_IN_ROT_270;
852		break;
853	default:
854		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
855		return -EINVAL;
856	}
857
858	gsc_write(cfg, GSC_IN_CON);
859
860	ctx->rotation = cfg &
861		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
862	*swap = ctx->rotation;
863
864	return 0;
865}
866
867static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
868{
869	DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
870
871	if (src >= dst * 8) {
872		DRM_ERROR("failed to make ratio and shift.\n");
873		return -EINVAL;
874	} else if (src >= dst * 4)
875		*ratio = 4;
876	else if (src >= dst * 2)
877		*ratio = 2;
878	else
879		*ratio = 1;
880
881	return 0;
882}
883
884static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
885{
886	if (hratio == 4 && vratio == 4)
887		*shfactor = 4;
888	else if ((hratio == 4 && vratio == 2) ||
889		 (hratio == 2 && vratio == 4))
890		*shfactor = 3;
891	else if ((hratio == 4 && vratio == 1) ||
892		 (hratio == 1 && vratio == 4) ||
893		 (hratio == 2 && vratio == 2))
894		*shfactor = 2;
895	else if (hratio == 1 && vratio == 1)
896		*shfactor = 0;
897	else
898		*shfactor = 1;
899}
900
901static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
902		struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
903{
904	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
905	u32 cfg;
906	u32 src_w, src_h, dst_w, dst_h;
907	int ret = 0;
908
909	src_w = src->w;
910	src_h = src->h;
911
912	if (ctx->rotation) {
913		dst_w = dst->h;
914		dst_h = dst->w;
915	} else {
916		dst_w = dst->w;
917		dst_h = dst->h;
918	}
919
920	ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
921	if (ret) {
922		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
923		return ret;
924	}
925
926	ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
927	if (ret) {
928		dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
929		return ret;
930	}
931
932	DRM_DEBUG_KMS("pre_hratio[%d]pre_vratio[%d]\n",
933		sc->pre_hratio, sc->pre_vratio);
934
935	sc->main_hratio = (src_w << 16) / dst_w;
936	sc->main_vratio = (src_h << 16) / dst_h;
937
938	DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
939		sc->main_hratio, sc->main_vratio);
940
941	gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
942		&sc->pre_shfactor);
943
944	DRM_DEBUG_KMS("pre_shfactor[%d]\n", sc->pre_shfactor);
945
946	cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
947		GSC_PRESC_H_RATIO(sc->pre_hratio) |
948		GSC_PRESC_V_RATIO(sc->pre_vratio));
949	gsc_write(cfg, GSC_PRE_SCALE_RATIO);
950
951	return ret;
952}
953
954static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
955{
956	int i, j, k, sc_ratio;
957
958	if (main_hratio <= GSC_SC_UP_MAX_RATIO)
959		sc_ratio = 0;
960	else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
961		sc_ratio = 1;
962	else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
963		sc_ratio = 2;
964	else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
965		sc_ratio = 3;
966	else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
967		sc_ratio = 4;
968	else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
969		sc_ratio = 5;
970	else
971		sc_ratio = 6;
972
973	for (i = 0; i < GSC_COEF_PHASE; i++)
974		for (j = 0; j < GSC_COEF_H_8T; j++)
975			for (k = 0; k < GSC_COEF_DEPTH; k++)
976				gsc_write(h_coef_8t[sc_ratio][i][j],
977					GSC_HCOEF(i, j, k));
978}
979
980static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
981{
982	int i, j, k, sc_ratio;
983
984	if (main_vratio <= GSC_SC_UP_MAX_RATIO)
985		sc_ratio = 0;
986	else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
987		sc_ratio = 1;
988	else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
989		sc_ratio = 2;
990	else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
991		sc_ratio = 3;
992	else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
993		sc_ratio = 4;
994	else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
995		sc_ratio = 5;
996	else
997		sc_ratio = 6;
998
999	for (i = 0; i < GSC_COEF_PHASE; i++)
1000		for (j = 0; j < GSC_COEF_V_4T; j++)
1001			for (k = 0; k < GSC_COEF_DEPTH; k++)
1002				gsc_write(v_coef_4t[sc_ratio][i][j],
1003					GSC_VCOEF(i, j, k));
1004}
1005
1006static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
1007{
1008	u32 cfg;
1009
1010	DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
1011		sc->main_hratio, sc->main_vratio);
1012
1013	gsc_set_h_coef(ctx, sc->main_hratio);
1014	cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
1015	gsc_write(cfg, GSC_MAIN_H_RATIO);
1016
1017	gsc_set_v_coef(ctx, sc->main_vratio);
1018	cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
1019	gsc_write(cfg, GSC_MAIN_V_RATIO);
1020}
1021
1022static int gsc_dst_set_size(struct device *dev, int swap,
1023		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
1024{
1025	struct gsc_context *ctx = get_gsc_context(dev);
1026	struct drm_exynos_pos img_pos = *pos;
1027	struct gsc_scaler *sc = &ctx->sc;
1028	u32 cfg;
1029
1030	DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
1031		swap, pos->x, pos->y, pos->w, pos->h);
1032
1033	if (swap) {
1034		img_pos.w = pos->h;
1035		img_pos.h = pos->w;
1036	}
1037
1038	/* pixel offset */
1039	cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
1040		GSC_DSTIMG_OFFSET_Y(pos->y));
1041	gsc_write(cfg, GSC_DSTIMG_OFFSET);
1042
1043	/* scaled size */
1044	cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
1045	gsc_write(cfg, GSC_SCALED_SIZE);
1046
1047	DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
1048
1049	/* original size */
1050	cfg = gsc_read(GSC_DSTIMG_SIZE);
1051	cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
1052		GSC_DSTIMG_WIDTH_MASK);
1053	cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
1054		GSC_DSTIMG_HEIGHT(sz->vsize));
1055	gsc_write(cfg, GSC_DSTIMG_SIZE);
1056
1057	cfg = gsc_read(GSC_OUT_CON);
1058	cfg &= ~GSC_OUT_RGB_TYPE_MASK;
1059
1060	DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range);
1061
1062	if (pos->w >= GSC_WIDTH_ITU_709)
1063		if (sc->range)
1064			cfg |= GSC_OUT_RGB_HD_WIDE;
1065		else
1066			cfg |= GSC_OUT_RGB_HD_NARROW;
1067	else
1068		if (sc->range)
1069			cfg |= GSC_OUT_RGB_SD_WIDE;
1070		else
1071			cfg |= GSC_OUT_RGB_SD_NARROW;
1072
1073	gsc_write(cfg, GSC_OUT_CON);
1074
1075	return 0;
1076}
1077
1078static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
1079{
1080	u32 cfg, i, buf_num = GSC_REG_SZ;
1081	u32 mask = 0x00000001;
1082
1083	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1084
1085	for (i = 0; i < GSC_REG_SZ; i++)
1086		if (cfg & (mask << i))
1087			buf_num--;
1088
1089	DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
1090
1091	return buf_num;
1092}
1093
1094static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
1095		enum drm_exynos_ipp_buf_type buf_type)
1096{
1097	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1098	bool masked;
1099	u32 cfg;
1100	u32 mask = 0x00000001 << buf_id;
1101	int ret = 0;
1102
1103	DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
1104
1105	mutex_lock(&ctx->lock);
1106
1107	/* mask register set */
1108	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1109
1110	switch (buf_type) {
1111	case IPP_BUF_ENQUEUE:
1112		masked = false;
1113		break;
1114	case IPP_BUF_DEQUEUE:
1115		masked = true;
1116		break;
1117	default:
1118		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1119		ret =  -EINVAL;
1120		goto err_unlock;
1121	}
1122
1123	/* sequence id */
1124	cfg &= ~mask;
1125	cfg |= masked << buf_id;
1126	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
1127	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
1128	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
1129
1130	/* interrupt enable */
1131	if (buf_type == IPP_BUF_ENQUEUE &&
1132	    gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
1133		gsc_handle_irq(ctx, true, false, true);
1134
1135	/* interrupt disable */
1136	if (buf_type == IPP_BUF_DEQUEUE &&
1137	    gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
1138		gsc_handle_irq(ctx, false, false, true);
1139
1140err_unlock:
1141	mutex_unlock(&ctx->lock);
1142	return ret;
1143}
1144
1145static int gsc_dst_set_addr(struct device *dev,
1146		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
1147		enum drm_exynos_ipp_buf_type buf_type)
1148{
1149	struct gsc_context *ctx = get_gsc_context(dev);
1150	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1151	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1152	struct drm_exynos_ipp_property *property;
1153
1154	if (!c_node) {
1155		DRM_ERROR("failed to get c_node.\n");
1156		return -EFAULT;
1157	}
1158
1159	property = &c_node->property;
1160
1161	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
1162		property->prop_id, buf_id, buf_type);
1163
1164	if (buf_id > GSC_MAX_DST) {
1165		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
1166		return -EINVAL;
1167	}
1168
1169	/* address register set */
1170	switch (buf_type) {
1171	case IPP_BUF_ENQUEUE:
1172		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
1173			GSC_OUT_BASE_ADDR_Y(buf_id));
1174		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1175			GSC_OUT_BASE_ADDR_CB(buf_id));
1176		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1177			GSC_OUT_BASE_ADDR_CR(buf_id));
1178		break;
1179	case IPP_BUF_DEQUEUE:
1180		gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
1181		gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
1182		gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
1183		break;
1184	default:
1185		/* bypass */
1186		break;
1187	}
1188
1189	return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
1190}
1191
1192static struct exynos_drm_ipp_ops gsc_dst_ops = {
1193	.set_fmt = gsc_dst_set_fmt,
1194	.set_transf = gsc_dst_set_transf,
1195	.set_size = gsc_dst_set_size,
1196	.set_addr = gsc_dst_set_addr,
1197};
1198
1199static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1200{
1201	DRM_DEBUG_KMS("enable[%d]\n", enable);
1202
1203	if (enable) {
1204		clk_enable(ctx->gsc_clk);
1205		ctx->suspended = false;
1206	} else {
1207		clk_disable(ctx->gsc_clk);
1208		ctx->suspended = true;
1209	}
1210
1211	return 0;
1212}
1213
1214static int gsc_get_src_buf_index(struct gsc_context *ctx)
1215{
1216	u32 cfg, curr_index, i;
1217	u32 buf_id = GSC_MAX_SRC;
1218	int ret;
1219
1220	DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
1221
1222	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
1223	curr_index = GSC_IN_CURR_GET_INDEX(cfg);
1224
1225	for (i = curr_index; i < GSC_MAX_SRC; i++) {
1226		if (!((cfg >> i) & 0x1)) {
1227			buf_id = i;
1228			break;
1229		}
1230	}
1231
1232	if (buf_id == GSC_MAX_SRC) {
1233		DRM_ERROR("failed to get in buffer index.\n");
1234		return -EINVAL;
1235	}
1236
1237	ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1238	if (ret < 0) {
1239		DRM_ERROR("failed to dequeue.\n");
1240		return ret;
1241	}
1242
1243	DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
1244		curr_index, buf_id);
1245
1246	return buf_id;
1247}
1248
1249static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1250{
1251	u32 cfg, curr_index, i;
1252	u32 buf_id = GSC_MAX_DST;
1253	int ret;
1254
1255	DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
1256
1257	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1258	curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
1259
1260	for (i = curr_index; i < GSC_MAX_DST; i++) {
1261		if (!((cfg >> i) & 0x1)) {
1262			buf_id = i;
1263			break;
1264		}
1265	}
1266
1267	if (buf_id == GSC_MAX_DST) {
1268		DRM_ERROR("failed to get out buffer index.\n");
1269		return -EINVAL;
1270	}
1271
1272	ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1273	if (ret < 0) {
1274		DRM_ERROR("failed to dequeue.\n");
1275		return ret;
1276	}
1277
1278	DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
1279		curr_index, buf_id);
1280
1281	return buf_id;
1282}
1283
1284static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1285{
1286	struct gsc_context *ctx = dev_id;
1287	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1288	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1289	struct drm_exynos_ipp_event_work *event_work =
1290		c_node->event_work;
1291	u32 status;
1292	int buf_id[EXYNOS_DRM_OPS_MAX];
1293
1294	DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
1295
1296	status = gsc_read(GSC_IRQ);
1297	if (status & GSC_IRQ_STATUS_OR_IRQ) {
1298		dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
1299			ctx->id, status);
1300		return IRQ_NONE;
1301	}
1302
1303	if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
1304		dev_dbg(ippdrv->dev, "occurred frame done at %d, status 0x%x.\n",
1305			ctx->id, status);
1306
1307		buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
1308		if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
1309			return IRQ_HANDLED;
1310
1311		buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
1312		if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
1313			return IRQ_HANDLED;
1314
1315		DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n",
1316			buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
1317
1318		event_work->ippdrv = ippdrv;
1319		event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
1320			buf_id[EXYNOS_DRM_OPS_SRC];
1321		event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1322			buf_id[EXYNOS_DRM_OPS_DST];
1323		queue_work(ippdrv->event_workq, &event_work->work);
1324	}
1325
1326	return IRQ_HANDLED;
1327}
1328
1329static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1330{
1331	struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
1332
1333	prop_list->version = 1;
1334	prop_list->writeback = 1;
1335	prop_list->refresh_min = GSC_REFRESH_MIN;
1336	prop_list->refresh_max = GSC_REFRESH_MAX;
1337	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1338				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1339	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1340				(1 << EXYNOS_DRM_DEGREE_90) |
1341				(1 << EXYNOS_DRM_DEGREE_180) |
1342				(1 << EXYNOS_DRM_DEGREE_270);
1343	prop_list->csc = 1;
1344	prop_list->crop = 1;
1345	prop_list->crop_max.hsize = GSC_CROP_MAX;
1346	prop_list->crop_max.vsize = GSC_CROP_MAX;
1347	prop_list->crop_min.hsize = GSC_CROP_MIN;
1348	prop_list->crop_min.vsize = GSC_CROP_MIN;
1349	prop_list->scale = 1;
1350	prop_list->scale_max.hsize = GSC_SCALE_MAX;
1351	prop_list->scale_max.vsize = GSC_SCALE_MAX;
1352	prop_list->scale_min.hsize = GSC_SCALE_MIN;
1353	prop_list->scale_min.vsize = GSC_SCALE_MIN;
1354
1355	return 0;
1356}
1357
1358static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
1359{
1360	switch (flip) {
1361	case EXYNOS_DRM_FLIP_NONE:
1362	case EXYNOS_DRM_FLIP_VERTICAL:
1363	case EXYNOS_DRM_FLIP_HORIZONTAL:
1364	case EXYNOS_DRM_FLIP_BOTH:
1365		return true;
1366	default:
1367		DRM_DEBUG_KMS("invalid flip\n");
1368		return false;
1369	}
1370}
1371
1372static int gsc_ippdrv_check_property(struct device *dev,
1373		struct drm_exynos_ipp_property *property)
1374{
1375	struct gsc_context *ctx = get_gsc_context(dev);
1376	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1377	struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
1378	struct drm_exynos_ipp_config *config;
1379	struct drm_exynos_pos *pos;
1380	struct drm_exynos_sz *sz;
1381	bool swap;
1382	int i;
1383
1384	for_each_ipp_ops(i) {
1385		if ((i == EXYNOS_DRM_OPS_SRC) &&
1386			(property->cmd == IPP_CMD_WB))
1387			continue;
1388
1389		config = &property->config[i];
1390		pos = &config->pos;
1391		sz = &config->sz;
1392
1393		/* check for flip */
1394		if (!gsc_check_drm_flip(config->flip)) {
1395			DRM_ERROR("invalid flip.\n");
1396			goto err_property;
1397		}
1398
1399		/* check for degree */
1400		switch (config->degree) {
1401		case EXYNOS_DRM_DEGREE_90:
1402		case EXYNOS_DRM_DEGREE_270:
1403			swap = true;
1404			break;
1405		case EXYNOS_DRM_DEGREE_0:
1406		case EXYNOS_DRM_DEGREE_180:
1407			swap = false;
1408			break;
1409		default:
1410			DRM_ERROR("invalid degree.\n");
1411			goto err_property;
1412		}
1413
1414		/* check for buffer bound */
1415		if ((pos->x + pos->w > sz->hsize) ||
1416			(pos->y + pos->h > sz->vsize)) {
1417			DRM_ERROR("out of buf bound.\n");
1418			goto err_property;
1419		}
1420
1421		/* check for crop */
1422		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
1423			if (swap) {
1424				if ((pos->h < pp->crop_min.hsize) ||
1425					(sz->vsize > pp->crop_max.hsize) ||
1426					(pos->w < pp->crop_min.vsize) ||
1427					(sz->hsize > pp->crop_max.vsize)) {
1428					DRM_ERROR("out of crop size.\n");
1429					goto err_property;
1430				}
1431			} else {
1432				if ((pos->w < pp->crop_min.hsize) ||
1433					(sz->hsize > pp->crop_max.hsize) ||
1434					(pos->h < pp->crop_min.vsize) ||
1435					(sz->vsize > pp->crop_max.vsize)) {
1436					DRM_ERROR("out of crop size.\n");
1437					goto err_property;
1438				}
1439			}
1440		}
1441
1442		/* check for scale */
1443		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1444			if (swap) {
1445				if ((pos->h < pp->scale_min.hsize) ||
1446					(sz->vsize > pp->scale_max.hsize) ||
1447					(pos->w < pp->scale_min.vsize) ||
1448					(sz->hsize > pp->scale_max.vsize)) {
1449					DRM_ERROR("out of scale size.\n");
1450					goto err_property;
1451				}
1452			} else {
1453				if ((pos->w < pp->scale_min.hsize) ||
1454					(sz->hsize > pp->scale_max.hsize) ||
1455					(pos->h < pp->scale_min.vsize) ||
1456					(sz->vsize > pp->scale_max.vsize)) {
1457					DRM_ERROR("out of scale size.\n");
1458					goto err_property;
1459				}
1460			}
1461		}
1462	}
1463
1464	return 0;
1465
1466err_property:
1467	for_each_ipp_ops(i) {
1468		if ((i == EXYNOS_DRM_OPS_SRC) &&
1469			(property->cmd == IPP_CMD_WB))
1470			continue;
1471
1472		config = &property->config[i];
1473		pos = &config->pos;
1474		sz = &config->sz;
1475
1476		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1477			i ? "dst" : "src", config->flip, config->degree,
1478			pos->x, pos->y, pos->w, pos->h,
1479			sz->hsize, sz->vsize);
1480	}
1481
1482	return -EINVAL;
1483}
1484
1485
1486static int gsc_ippdrv_reset(struct device *dev)
1487{
1488	struct gsc_context *ctx = get_gsc_context(dev);
1489	struct gsc_scaler *sc = &ctx->sc;
1490	int ret;
1491
1492	/* reset h/w block */
1493	ret = gsc_sw_reset(ctx);
1494	if (ret < 0) {
1495		dev_err(dev, "failed to reset hardware.\n");
1496		return ret;
1497	}
1498
1499	/* scaler setting */
1500	memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1501	sc->range = true;
1502
1503	return 0;
1504}
1505
1506static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1507{
1508	struct gsc_context *ctx = get_gsc_context(dev);
1509	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1510	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1511	struct drm_exynos_ipp_property *property;
1512	struct drm_exynos_ipp_config *config;
1513	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX];
1514	struct drm_exynos_ipp_set_wb set_wb;
1515	u32 cfg;
1516	int ret, i;
1517
1518	DRM_DEBUG_KMS("cmd[%d]\n", cmd);
1519
1520	if (!c_node) {
1521		DRM_ERROR("failed to get c_node.\n");
1522		return -EINVAL;
1523	}
1524
1525	property = &c_node->property;
1526
1527	gsc_handle_irq(ctx, true, false, true);
1528
1529	for_each_ipp_ops(i) {
1530		config = &property->config[i];
1531		img_pos[i] = config->pos;
1532	}
1533
1534	switch (cmd) {
1535	case IPP_CMD_M2M:
1536		/* enable one shot */
1537		cfg = gsc_read(GSC_ENABLE);
1538		cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
1539			GSC_ENABLE_CLK_GATE_MODE_MASK);
1540		cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
1541		gsc_write(cfg, GSC_ENABLE);
1542
1543		/* src dma memory */
1544		cfg = gsc_read(GSC_IN_CON);
1545		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1546		cfg |= GSC_IN_PATH_MEMORY;
1547		gsc_write(cfg, GSC_IN_CON);
1548
1549		/* dst dma memory */
1550		cfg = gsc_read(GSC_OUT_CON);
1551		cfg |= GSC_OUT_PATH_MEMORY;
1552		gsc_write(cfg, GSC_OUT_CON);
1553		break;
1554	case IPP_CMD_WB:
1555		set_wb.enable = 1;
1556		set_wb.refresh = property->refresh_rate;
1557		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1558		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1559
1560		/* src local path */
1561		cfg = gsc_read(GSC_IN_CON);
1562		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1563		cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
1564		gsc_write(cfg, GSC_IN_CON);
1565
1566		/* dst dma memory */
1567		cfg = gsc_read(GSC_OUT_CON);
1568		cfg |= GSC_OUT_PATH_MEMORY;
1569		gsc_write(cfg, GSC_OUT_CON);
1570		break;
1571	case IPP_CMD_OUTPUT:
1572		/* src dma memory */
1573		cfg = gsc_read(GSC_IN_CON);
1574		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1575		cfg |= GSC_IN_PATH_MEMORY;
1576		gsc_write(cfg, GSC_IN_CON);
1577
1578		/* dst local path */
1579		cfg = gsc_read(GSC_OUT_CON);
1580		cfg |= GSC_OUT_PATH_MEMORY;
1581		gsc_write(cfg, GSC_OUT_CON);
1582		break;
1583	default:
1584		ret = -EINVAL;
1585		dev_err(dev, "invalid operations.\n");
1586		return ret;
1587	}
1588
1589	ret = gsc_set_prescaler(ctx, &ctx->sc,
1590		&img_pos[EXYNOS_DRM_OPS_SRC],
1591		&img_pos[EXYNOS_DRM_OPS_DST]);
1592	if (ret) {
1593		dev_err(dev, "failed to set precalser.\n");
1594		return ret;
1595	}
1596
1597	gsc_set_scaler(ctx, &ctx->sc);
1598
1599	cfg = gsc_read(GSC_ENABLE);
1600	cfg |= GSC_ENABLE_ON;
1601	gsc_write(cfg, GSC_ENABLE);
1602
1603	return 0;
1604}
1605
1606static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1607{
1608	struct gsc_context *ctx = get_gsc_context(dev);
1609	struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1610	u32 cfg;
1611
1612	DRM_DEBUG_KMS("cmd[%d]\n", cmd);
1613
1614	switch (cmd) {
1615	case IPP_CMD_M2M:
1616		/* bypass */
1617		break;
1618	case IPP_CMD_WB:
1619		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1620		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1621		break;
1622	case IPP_CMD_OUTPUT:
1623	default:
1624		dev_err(dev, "invalid operations.\n");
1625		break;
1626	}
1627
1628	gsc_handle_irq(ctx, false, false, true);
1629
1630	/* reset sequence */
1631	gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
1632	gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
1633	gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
1634
1635	cfg = gsc_read(GSC_ENABLE);
1636	cfg &= ~GSC_ENABLE_ON;
1637	gsc_write(cfg, GSC_ENABLE);
1638}
1639
1640static int gsc_probe(struct platform_device *pdev)
1641{
1642	struct device *dev = &pdev->dev;
1643	struct gsc_context *ctx;
1644	struct resource *res;
1645	struct exynos_drm_ippdrv *ippdrv;
1646	int ret;
1647
1648	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1649	if (!ctx)
1650		return -ENOMEM;
1651
1652	/* clock control */
1653	ctx->gsc_clk = devm_clk_get(dev, "gscl");
1654	if (IS_ERR(ctx->gsc_clk)) {
1655		dev_err(dev, "failed to get gsc clock.\n");
1656		return PTR_ERR(ctx->gsc_clk);
1657	}
1658
1659	/* resource memory */
1660	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1661	ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
1662	if (IS_ERR(ctx->regs))
1663		return PTR_ERR(ctx->regs);
1664
1665	/* resource irq */
1666	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1667	if (!res) {
1668		dev_err(dev, "failed to request irq resource.\n");
1669		return -ENOENT;
1670	}
1671
1672	ctx->irq = res->start;
1673	ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler,
1674		IRQF_ONESHOT, "drm_gsc", ctx);
1675	if (ret < 0) {
1676		dev_err(dev, "failed to request irq.\n");
1677		return ret;
1678	}
1679
1680	/* context initailization */
1681	ctx->id = pdev->id;
1682
1683	ippdrv = &ctx->ippdrv;
1684	ippdrv->dev = dev;
1685	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
1686	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
1687	ippdrv->check_property = gsc_ippdrv_check_property;
1688	ippdrv->reset = gsc_ippdrv_reset;
1689	ippdrv->start = gsc_ippdrv_start;
1690	ippdrv->stop = gsc_ippdrv_stop;
1691	ret = gsc_init_prop_list(ippdrv);
1692	if (ret < 0) {
1693		dev_err(dev, "failed to init property list.\n");
1694		return ret;
1695	}
1696
1697	DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
1698
1699	mutex_init(&ctx->lock);
1700	platform_set_drvdata(pdev, ctx);
1701
1702	pm_runtime_set_active(dev);
1703	pm_runtime_enable(dev);
1704
1705	ret = exynos_drm_ippdrv_register(ippdrv);
1706	if (ret < 0) {
1707		dev_err(dev, "failed to register drm gsc device.\n");
1708		goto err_ippdrv_register;
1709	}
1710
1711	dev_info(dev, "drm gsc registered successfully.\n");
1712
1713	return 0;
1714
1715err_ippdrv_register:
1716	pm_runtime_disable(dev);
1717	return ret;
1718}
1719
1720static int gsc_remove(struct platform_device *pdev)
1721{
1722	struct device *dev = &pdev->dev;
1723	struct gsc_context *ctx = get_gsc_context(dev);
1724	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1725
1726	exynos_drm_ippdrv_unregister(ippdrv);
1727	mutex_destroy(&ctx->lock);
1728
1729	pm_runtime_set_suspended(dev);
1730	pm_runtime_disable(dev);
1731
1732	return 0;
1733}
1734
1735#ifdef CONFIG_PM_SLEEP
1736static int gsc_suspend(struct device *dev)
1737{
1738	struct gsc_context *ctx = get_gsc_context(dev);
1739
1740	DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1741
1742	if (pm_runtime_suspended(dev))
1743		return 0;
1744
1745	return gsc_clk_ctrl(ctx, false);
1746}
1747
1748static int gsc_resume(struct device *dev)
1749{
1750	struct gsc_context *ctx = get_gsc_context(dev);
1751
1752	DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1753
1754	if (!pm_runtime_suspended(dev))
1755		return gsc_clk_ctrl(ctx, true);
1756
1757	return 0;
1758}
1759#endif
1760
1761#ifdef CONFIG_PM
1762static int gsc_runtime_suspend(struct device *dev)
1763{
1764	struct gsc_context *ctx = get_gsc_context(dev);
1765
1766	DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1767
1768	return  gsc_clk_ctrl(ctx, false);
1769}
1770
1771static int gsc_runtime_resume(struct device *dev)
1772{
1773	struct gsc_context *ctx = get_gsc_context(dev);
1774
1775	DRM_DEBUG_KMS("id[%d]\n", ctx->id);
1776
1777	return  gsc_clk_ctrl(ctx, true);
1778}
1779#endif
1780
1781static const struct dev_pm_ops gsc_pm_ops = {
1782	SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
1783	SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
1784};
1785
1786struct platform_driver gsc_driver = {
1787	.probe		= gsc_probe,
1788	.remove		= gsc_remove,
1789	.driver		= {
1790		.name	= "exynos-drm-gsc",
1791		.owner	= THIS_MODULE,
1792		.pm	= &gsc_pm_ops,
1793	},
1794};
1795
1796