1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
33#include <uapi/drm/i915_drm.h>
34#include <uapi/drm/drm_fourcc.h>
35
36#include "i915_reg.h"
37#include "intel_bios.h"
38#include "intel_ringbuffer.h"
39#include "intel_lrc.h"
40#include "i915_gem_gtt.h"
41#include "i915_gem_render_state.h"
42#include <linux/io-mapping.h>
43#include <linux/i2c.h>
44#include <linux/i2c-algo-bit.h>
45#include <drm/intel-gtt.h>
46#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
47#include <drm/drm_gem.h>
48#include <linux/backlight.h>
49#include <linux/hashtable.h>
50#include <linux/intel-iommu.h>
51#include <linux/kref.h>
52#include <linux/pm_qos.h>
53
54/* General customization:
55 */
56
57#define DRIVER_NAME		"i915"
58#define DRIVER_DESC		"Intel Graphics"
59#define DRIVER_DATE		"20150327"
60
61#undef WARN_ON
62/* Many gcc seem to no see through this and fall over :( */
63#if 0
64#define WARN_ON(x) ({ \
65	bool __i915_warn_cond = (x); \
66	if (__builtin_constant_p(__i915_warn_cond)) \
67		BUILD_BUG_ON(__i915_warn_cond); \
68	WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
69#else
70#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
71#endif
72
73#undef WARN_ON_ONCE
74#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")
75
76#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
77			     (long) (x), __func__);
78
79/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
80 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
81 * which may not necessarily be a user visible problem.  This will either
82 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
83 * enable distros and users to tailor their preferred amount of i915 abrt
84 * spam.
85 */
86#define I915_STATE_WARN(condition, format...) ({			\
87	int __ret_warn_on = !!(condition);				\
88	if (unlikely(__ret_warn_on)) {					\
89		if (i915.verbose_state_checks)				\
90			WARN(1, format);				\
91		else 							\
92			DRM_ERROR(format);				\
93	}								\
94	unlikely(__ret_warn_on);					\
95})
96
97#define I915_STATE_WARN_ON(condition) ({				\
98	int __ret_warn_on = !!(condition);				\
99	if (unlikely(__ret_warn_on)) {					\
100		if (i915.verbose_state_checks)				\
101			WARN(1, "WARN_ON(" #condition ")\n");		\
102		else 							\
103			DRM_ERROR("WARN_ON(" #condition ")\n");		\
104	}								\
105	unlikely(__ret_warn_on);					\
106})
107
108enum pipe {
109	INVALID_PIPE = -1,
110	PIPE_A = 0,
111	PIPE_B,
112	PIPE_C,
113	_PIPE_EDP,
114	I915_MAX_PIPES = _PIPE_EDP
115};
116#define pipe_name(p) ((p) + 'A')
117
118enum transcoder {
119	TRANSCODER_A = 0,
120	TRANSCODER_B,
121	TRANSCODER_C,
122	TRANSCODER_EDP,
123	I915_MAX_TRANSCODERS
124};
125#define transcoder_name(t) ((t) + 'A')
126
127/*
128 * This is the maximum (across all platforms) number of planes (primary +
129 * sprites) that can be active at the same time on one pipe.
130 *
131 * This value doesn't count the cursor plane.
132 */
133#define I915_MAX_PLANES	3
134
135enum plane {
136	PLANE_A = 0,
137	PLANE_B,
138	PLANE_C,
139};
140#define plane_name(p) ((p) + 'A')
141
142#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
143
144enum port {
145	PORT_A = 0,
146	PORT_B,
147	PORT_C,
148	PORT_D,
149	PORT_E,
150	I915_MAX_PORTS
151};
152#define port_name(p) ((p) + 'A')
153
154#define I915_NUM_PHYS_VLV 2
155
156enum dpio_channel {
157	DPIO_CH0,
158	DPIO_CH1
159};
160
161enum dpio_phy {
162	DPIO_PHY0,
163	DPIO_PHY1
164};
165
166enum intel_display_power_domain {
167	POWER_DOMAIN_PIPE_A,
168	POWER_DOMAIN_PIPE_B,
169	POWER_DOMAIN_PIPE_C,
170	POWER_DOMAIN_PIPE_A_PANEL_FITTER,
171	POWER_DOMAIN_PIPE_B_PANEL_FITTER,
172	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
173	POWER_DOMAIN_TRANSCODER_A,
174	POWER_DOMAIN_TRANSCODER_B,
175	POWER_DOMAIN_TRANSCODER_C,
176	POWER_DOMAIN_TRANSCODER_EDP,
177	POWER_DOMAIN_PORT_DDI_A_2_LANES,
178	POWER_DOMAIN_PORT_DDI_A_4_LANES,
179	POWER_DOMAIN_PORT_DDI_B_2_LANES,
180	POWER_DOMAIN_PORT_DDI_B_4_LANES,
181	POWER_DOMAIN_PORT_DDI_C_2_LANES,
182	POWER_DOMAIN_PORT_DDI_C_4_LANES,
183	POWER_DOMAIN_PORT_DDI_D_2_LANES,
184	POWER_DOMAIN_PORT_DDI_D_4_LANES,
185	POWER_DOMAIN_PORT_DSI,
186	POWER_DOMAIN_PORT_CRT,
187	POWER_DOMAIN_PORT_OTHER,
188	POWER_DOMAIN_VGA,
189	POWER_DOMAIN_AUDIO,
190	POWER_DOMAIN_PLLS,
191	POWER_DOMAIN_AUX_A,
192	POWER_DOMAIN_AUX_B,
193	POWER_DOMAIN_AUX_C,
194	POWER_DOMAIN_AUX_D,
195	POWER_DOMAIN_INIT,
196
197	POWER_DOMAIN_NUM,
198};
199
200#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
201#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
202		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
203#define POWER_DOMAIN_TRANSCODER(tran) \
204	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
205	 (tran) + POWER_DOMAIN_TRANSCODER_A)
206
207enum hpd_pin {
208	HPD_NONE = 0,
209	HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
210	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
211	HPD_CRT,
212	HPD_SDVO_B,
213	HPD_SDVO_C,
214	HPD_PORT_B,
215	HPD_PORT_C,
216	HPD_PORT_D,
217	HPD_NUM_PINS
218};
219
220#define I915_GEM_GPU_DOMAINS \
221	(I915_GEM_DOMAIN_RENDER | \
222	 I915_GEM_DOMAIN_SAMPLER | \
223	 I915_GEM_DOMAIN_COMMAND | \
224	 I915_GEM_DOMAIN_INSTRUCTION | \
225	 I915_GEM_DOMAIN_VERTEX)
226
227#define for_each_pipe(__dev_priv, __p) \
228	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
229#define for_each_plane(__dev_priv, __pipe, __p)				\
230	for ((__p) = 0;							\
231	     (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;	\
232	     (__p)++)
233#define for_each_sprite(__dev_priv, __p, __s)				\
234	for ((__s) = 0;							\
235	     (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];	\
236	     (__s)++)
237
238#define for_each_crtc(dev, crtc) \
239	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
240
241#define for_each_intel_crtc(dev, intel_crtc) \
242	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
243
244#define for_each_intel_encoder(dev, intel_encoder)		\
245	list_for_each_entry(intel_encoder,			\
246			    &(dev)->mode_config.encoder_list,	\
247			    base.head)
248
249#define for_each_intel_connector(dev, intel_connector)		\
250	list_for_each_entry(intel_connector,			\
251			    &dev->mode_config.connector_list,	\
252			    base.head)
253
254
255#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
256	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
257		if ((intel_encoder)->base.crtc == (__crtc))
258
259#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
260	list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
261		if ((intel_connector)->base.encoder == (__encoder))
262
263#define for_each_power_domain(domain, mask)				\
264	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
265		if ((1 << (domain)) & (mask))
266
267struct drm_i915_private;
268struct i915_mm_struct;
269struct i915_mmu_object;
270
271enum intel_dpll_id {
272	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
273	/* real shared dpll ids must be >= 0 */
274	DPLL_ID_PCH_PLL_A = 0,
275	DPLL_ID_PCH_PLL_B = 1,
276	/* hsw/bdw */
277	DPLL_ID_WRPLL1 = 0,
278	DPLL_ID_WRPLL2 = 1,
279	/* skl */
280	DPLL_ID_SKL_DPLL1 = 0,
281	DPLL_ID_SKL_DPLL2 = 1,
282	DPLL_ID_SKL_DPLL3 = 2,
283};
284#define I915_NUM_PLLS 3
285
286struct intel_dpll_hw_state {
287	/* i9xx, pch plls */
288	uint32_t dpll;
289	uint32_t dpll_md;
290	uint32_t fp0;
291	uint32_t fp1;
292
293	/* hsw, bdw */
294	uint32_t wrpll;
295
296	/* skl */
297	/*
298	 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
299	 * lower part of crtl1 and they get shifted into position when writing
300	 * the register.  This allows us to easily compare the state to share
301	 * the DPLL.
302	 */
303	uint32_t ctrl1;
304	/* HDMI only, 0 when used for DP */
305	uint32_t cfgcr1, cfgcr2;
306};
307
308struct intel_shared_dpll_config {
309	unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
310	struct intel_dpll_hw_state hw_state;
311};
312
313struct intel_shared_dpll {
314	struct intel_shared_dpll_config config;
315	struct intel_shared_dpll_config *new_config;
316
317	int active; /* count of number of active CRTCs (i.e. DPMS on) */
318	bool on; /* is the PLL actually active? Disabled during modeset */
319	const char *name;
320	/* should match the index in the dev_priv->shared_dplls array */
321	enum intel_dpll_id id;
322	/* The mode_set hook is optional and should be used together with the
323	 * intel_prepare_shared_dpll function. */
324	void (*mode_set)(struct drm_i915_private *dev_priv,
325			 struct intel_shared_dpll *pll);
326	void (*enable)(struct drm_i915_private *dev_priv,
327		       struct intel_shared_dpll *pll);
328	void (*disable)(struct drm_i915_private *dev_priv,
329			struct intel_shared_dpll *pll);
330	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
331			     struct intel_shared_dpll *pll,
332			     struct intel_dpll_hw_state *hw_state);
333};
334
335#define SKL_DPLL0 0
336#define SKL_DPLL1 1
337#define SKL_DPLL2 2
338#define SKL_DPLL3 3
339
340/* Used by dp and fdi links */
341struct intel_link_m_n {
342	uint32_t	tu;
343	uint32_t	gmch_m;
344	uint32_t	gmch_n;
345	uint32_t	link_m;
346	uint32_t	link_n;
347};
348
349void intel_link_compute_m_n(int bpp, int nlanes,
350			    int pixel_clock, int link_clock,
351			    struct intel_link_m_n *m_n);
352
353/* Interface history:
354 *
355 * 1.1: Original.
356 * 1.2: Add Power Management
357 * 1.3: Add vblank support
358 * 1.4: Fix cmdbuffer path, add heap destroy
359 * 1.5: Add vblank pipe configuration
360 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
361 *      - Support vertical blank on secondary display pipe
362 */
363#define DRIVER_MAJOR		1
364#define DRIVER_MINOR		6
365#define DRIVER_PATCHLEVEL	0
366
367#define WATCH_LISTS	0
368
369struct opregion_header;
370struct opregion_acpi;
371struct opregion_swsci;
372struct opregion_asle;
373
374struct intel_opregion {
375	struct opregion_header __iomem *header;
376	struct opregion_acpi __iomem *acpi;
377	struct opregion_swsci __iomem *swsci;
378	u32 swsci_gbda_sub_functions;
379	u32 swsci_sbcb_sub_functions;
380	struct opregion_asle __iomem *asle;
381	void __iomem *vbt;
382	u32 __iomem *lid_state;
383	struct work_struct asle_work;
384};
385#define OPREGION_SIZE            (8*1024)
386
387struct intel_overlay;
388struct intel_overlay_error_state;
389
390#define I915_FENCE_REG_NONE -1
391#define I915_MAX_NUM_FENCES 32
392/* 32 fences + sign bit for FENCE_REG_NONE */
393#define I915_MAX_NUM_FENCE_BITS 6
394
395struct drm_i915_fence_reg {
396	struct list_head lru_list;
397	struct drm_i915_gem_object *obj;
398	int pin_count;
399};
400
401struct sdvo_device_mapping {
402	u8 initialized;
403	u8 dvo_port;
404	u8 slave_addr;
405	u8 dvo_wiring;
406	u8 i2c_pin;
407	u8 ddc_pin;
408};
409
410struct intel_display_error_state;
411
412struct drm_i915_error_state {
413	struct kref ref;
414	struct timeval time;
415
416	char error_msg[128];
417	u32 reset_count;
418	u32 suspend_count;
419
420	/* Generic register state */
421	u32 eir;
422	u32 pgtbl_er;
423	u32 ier;
424	u32 gtier[4];
425	u32 ccid;
426	u32 derrmr;
427	u32 forcewake;
428	u32 error; /* gen6+ */
429	u32 err_int; /* gen7 */
430	u32 fault_data0; /* gen8, gen9 */
431	u32 fault_data1; /* gen8, gen9 */
432	u32 done_reg;
433	u32 gac_eco;
434	u32 gam_ecochk;
435	u32 gab_ctl;
436	u32 gfx_mode;
437	u32 extra_instdone[I915_NUM_INSTDONE_REG];
438	u64 fence[I915_MAX_NUM_FENCES];
439	struct intel_overlay_error_state *overlay;
440	struct intel_display_error_state *display;
441	struct drm_i915_error_object *semaphore_obj;
442
443	struct drm_i915_error_ring {
444		bool valid;
445		/* Software tracked state */
446		bool waiting;
447		int hangcheck_score;
448		enum intel_ring_hangcheck_action hangcheck_action;
449		int num_requests;
450
451		/* our own tracking of ring head and tail */
452		u32 cpu_ring_head;
453		u32 cpu_ring_tail;
454
455		u32 semaphore_seqno[I915_NUM_RINGS - 1];
456
457		/* Register state */
458		u32 tail;
459		u32 head;
460		u32 ctl;
461		u32 hws;
462		u32 ipeir;
463		u32 ipehr;
464		u32 instdone;
465		u32 bbstate;
466		u32 instpm;
467		u32 instps;
468		u32 seqno;
469		u64 bbaddr;
470		u64 acthd;
471		u32 fault_reg;
472		u64 faddr;
473		u32 rc_psmi; /* sleep state */
474		u32 semaphore_mboxes[I915_NUM_RINGS - 1];
475
476		struct drm_i915_error_object {
477			int page_count;
478			u32 gtt_offset;
479			u32 *pages[0];
480		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
481
482		struct drm_i915_error_request {
483			long jiffies;
484			u32 seqno;
485			u32 tail;
486		} *requests;
487
488		struct {
489			u32 gfx_mode;
490			union {
491				u64 pdp[4];
492				u32 pp_dir_base;
493			};
494		} vm_info;
495
496		pid_t pid;
497		char comm[TASK_COMM_LEN];
498	} ring[I915_NUM_RINGS];
499
500	struct drm_i915_error_buffer {
501		u32 size;
502		u32 name;
503		u32 rseqno, wseqno;
504		u32 gtt_offset;
505		u32 read_domains;
506		u32 write_domain;
507		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
508		s32 pinned:2;
509		u32 tiling:2;
510		u32 dirty:1;
511		u32 purgeable:1;
512		u32 userptr:1;
513		s32 ring:4;
514		u32 cache_level:3;
515	} **active_bo, **pinned_bo;
516
517	u32 *active_bo_count, *pinned_bo_count;
518	u32 vm_count;
519};
520
521struct intel_connector;
522struct intel_encoder;
523struct intel_crtc_state;
524struct intel_initial_plane_config;
525struct intel_crtc;
526struct intel_limit;
527struct dpll;
528
529struct drm_i915_display_funcs {
530	bool (*fbc_enabled)(struct drm_device *dev);
531	void (*enable_fbc)(struct drm_crtc *crtc);
532	void (*disable_fbc)(struct drm_device *dev);
533	int (*get_display_clock_speed)(struct drm_device *dev);
534	int (*get_fifo_size)(struct drm_device *dev, int plane);
535	/**
536	 * find_dpll() - Find the best values for the PLL
537	 * @limit: limits for the PLL
538	 * @crtc: current CRTC
539	 * @target: target frequency in kHz
540	 * @refclk: reference clock frequency in kHz
541	 * @match_clock: if provided, @best_clock P divider must
542	 *               match the P divider from @match_clock
543	 *               used for LVDS downclocking
544	 * @best_clock: best PLL values found
545	 *
546	 * Returns true on success, false on failure.
547	 */
548	bool (*find_dpll)(const struct intel_limit *limit,
549			  struct intel_crtc_state *crtc_state,
550			  int target, int refclk,
551			  struct dpll *match_clock,
552			  struct dpll *best_clock);
553	void (*update_wm)(struct drm_crtc *crtc);
554	void (*update_sprite_wm)(struct drm_plane *plane,
555				 struct drm_crtc *crtc,
556				 uint32_t sprite_width, uint32_t sprite_height,
557				 int pixel_size, bool enable, bool scaled);
558	void (*modeset_global_resources)(struct drm_atomic_state *state);
559	/* Returns the active state of the crtc, and if the crtc is active,
560	 * fills out the pipe-config with the hw state. */
561	bool (*get_pipe_config)(struct intel_crtc *,
562				struct intel_crtc_state *);
563	void (*get_initial_plane_config)(struct intel_crtc *,
564					 struct intel_initial_plane_config *);
565	int (*crtc_compute_clock)(struct intel_crtc *crtc,
566				  struct intel_crtc_state *crtc_state);
567	void (*crtc_enable)(struct drm_crtc *crtc);
568	void (*crtc_disable)(struct drm_crtc *crtc);
569	void (*off)(struct drm_crtc *crtc);
570	void (*audio_codec_enable)(struct drm_connector *connector,
571				   struct intel_encoder *encoder,
572				   struct drm_display_mode *mode);
573	void (*audio_codec_disable)(struct intel_encoder *encoder);
574	void (*fdi_link_train)(struct drm_crtc *crtc);
575	void (*init_clock_gating)(struct drm_device *dev);
576	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
577			  struct drm_framebuffer *fb,
578			  struct drm_i915_gem_object *obj,
579			  struct intel_engine_cs *ring,
580			  uint32_t flags);
581	void (*update_primary_plane)(struct drm_crtc *crtc,
582				     struct drm_framebuffer *fb,
583				     int x, int y);
584	void (*hpd_irq_setup)(struct drm_device *dev);
585	/* clock updates for mode set */
586	/* cursor updates */
587	/* render clock increase/decrease */
588	/* display clock increase/decrease */
589	/* pll clock increase/decrease */
590
591	int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
592	uint32_t (*get_backlight)(struct intel_connector *connector);
593	void (*set_backlight)(struct intel_connector *connector,
594			      uint32_t level);
595	void (*disable_backlight)(struct intel_connector *connector);
596	void (*enable_backlight)(struct intel_connector *connector);
597};
598
599enum forcewake_domain_id {
600	FW_DOMAIN_ID_RENDER = 0,
601	FW_DOMAIN_ID_BLITTER,
602	FW_DOMAIN_ID_MEDIA,
603
604	FW_DOMAIN_ID_COUNT
605};
606
607enum forcewake_domains {
608	FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
609	FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
610	FORCEWAKE_MEDIA	= (1 << FW_DOMAIN_ID_MEDIA),
611	FORCEWAKE_ALL = (FORCEWAKE_RENDER |
612			 FORCEWAKE_BLITTER |
613			 FORCEWAKE_MEDIA)
614};
615
616struct intel_uncore_funcs {
617	void (*force_wake_get)(struct drm_i915_private *dev_priv,
618							enum forcewake_domains domains);
619	void (*force_wake_put)(struct drm_i915_private *dev_priv,
620							enum forcewake_domains domains);
621
622	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
623	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
624	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
625	uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
626
627	void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
628				uint8_t val, bool trace);
629	void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
630				uint16_t val, bool trace);
631	void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
632				uint32_t val, bool trace);
633	void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
634				uint64_t val, bool trace);
635};
636
637struct intel_uncore {
638	spinlock_t lock; /** lock is also taken in irq contexts. */
639
640	struct intel_uncore_funcs funcs;
641
642	unsigned fifo_count;
643	enum forcewake_domains fw_domains;
644
645	struct intel_uncore_forcewake_domain {
646		struct drm_i915_private *i915;
647		enum forcewake_domain_id id;
648		unsigned wake_count;
649		struct timer_list timer;
650		u32 reg_set;
651		u32 val_set;
652		u32 val_clear;
653		u32 reg_ack;
654		u32 reg_post;
655		u32 val_reset;
656	} fw_domain[FW_DOMAIN_ID_COUNT];
657};
658
659/* Iterate over initialised fw domains */
660#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
661	for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
662	     (i__) < FW_DOMAIN_ID_COUNT; \
663	     (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
664		if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
665
666#define for_each_fw_domain(domain__, dev_priv__, i__) \
667	for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
668
669#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
670	func(is_mobile) sep \
671	func(is_i85x) sep \
672	func(is_i915g) sep \
673	func(is_i945gm) sep \
674	func(is_g33) sep \
675	func(need_gfx_hws) sep \
676	func(is_g4x) sep \
677	func(is_pineview) sep \
678	func(is_broadwater) sep \
679	func(is_crestline) sep \
680	func(is_ivybridge) sep \
681	func(is_valleyview) sep \
682	func(is_haswell) sep \
683	func(is_skylake) sep \
684	func(is_preliminary) sep \
685	func(has_fbc) sep \
686	func(has_pipe_cxsr) sep \
687	func(has_hotplug) sep \
688	func(cursor_needs_physical) sep \
689	func(has_overlay) sep \
690	func(overlay_needs_physical) sep \
691	func(supports_tv) sep \
692	func(has_llc) sep \
693	func(has_ddi) sep \
694	func(has_fpga_dbg)
695
696#define DEFINE_FLAG(name) u8 name:1
697#define SEP_SEMICOLON ;
698
699struct intel_device_info {
700	u32 display_mmio_offset;
701	u16 device_id;
702	u8 num_pipes:3;
703	u8 num_sprites[I915_MAX_PIPES];
704	u8 gen;
705	u8 ring_mask; /* Rings supported by the HW */
706	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
707	/* Register offsets for the various display pipes and transcoders */
708	int pipe_offsets[I915_MAX_TRANSCODERS];
709	int trans_offsets[I915_MAX_TRANSCODERS];
710	int palette_offsets[I915_MAX_PIPES];
711	int cursor_offsets[I915_MAX_PIPES];
712
713	/* Slice/subslice/EU info */
714	u8 slice_total;
715	u8 subslice_total;
716	u8 subslice_per_slice;
717	u8 eu_total;
718	u8 eu_per_subslice;
719	/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
720	u8 subslice_7eu[3];
721	u8 has_slice_pg:1;
722	u8 has_subslice_pg:1;
723	u8 has_eu_pg:1;
724};
725
726#undef DEFINE_FLAG
727#undef SEP_SEMICOLON
728
729enum i915_cache_level {
730	I915_CACHE_NONE = 0,
731	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
732	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
733			      caches, eg sampler/render caches, and the
734			      large Last-Level-Cache. LLC is coherent with
735			      the CPU, but L3 is only visible to the GPU. */
736	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
737};
738
739struct i915_ctx_hang_stats {
740	/* This context had batch pending when hang was declared */
741	unsigned batch_pending;
742
743	/* This context had batch active when hang was declared */
744	unsigned batch_active;
745
746	/* Time when this context was last blamed for a GPU reset */
747	unsigned long guilty_ts;
748
749	/* If the contexts causes a second GPU hang within this time,
750	 * it is permanently banned from submitting any more work.
751	 */
752	unsigned long ban_period_seconds;
753
754	/* This context is banned to submit more work */
755	bool banned;
756};
757
758/* This must match up with the value previously used for execbuf2.rsvd1. */
759#define DEFAULT_CONTEXT_HANDLE 0
760/**
761 * struct intel_context - as the name implies, represents a context.
762 * @ref: reference count.
763 * @user_handle: userspace tracking identity for this context.
764 * @remap_slice: l3 row remapping information.
765 * @file_priv: filp associated with this context (NULL for global default
766 *	       context).
767 * @hang_stats: information about the role of this context in possible GPU
768 *		hangs.
769 * @vm: virtual memory space used by this context.
770 * @legacy_hw_ctx: render context backing object and whether it is correctly
771 *                initialized (legacy ring submission mechanism only).
772 * @link: link in the global list of contexts.
773 *
774 * Contexts are memory images used by the hardware to store copies of their
775 * internal state.
776 */
777struct intel_context {
778	struct kref ref;
779	int user_handle;
780	uint8_t remap_slice;
781	struct drm_i915_file_private *file_priv;
782	struct i915_ctx_hang_stats hang_stats;
783	struct i915_hw_ppgtt *ppgtt;
784
785	/* Legacy ring buffer submission */
786	struct {
787		struct drm_i915_gem_object *rcs_state;
788		bool initialized;
789	} legacy_hw_ctx;
790
791	/* Execlists */
792	bool rcs_initialized;
793	struct {
794		struct drm_i915_gem_object *state;
795		struct intel_ringbuffer *ringbuf;
796		int pin_count;
797	} engine[I915_NUM_RINGS];
798
799	struct list_head link;
800};
801
802enum fb_op_origin {
803	ORIGIN_GTT,
804	ORIGIN_CPU,
805	ORIGIN_CS,
806	ORIGIN_FLIP,
807};
808
809struct i915_fbc {
810	unsigned long uncompressed_size;
811	unsigned threshold;
812	unsigned int fb_id;
813	unsigned int possible_framebuffer_bits;
814	unsigned int busy_bits;
815	struct intel_crtc *crtc;
816	int y;
817
818	struct drm_mm_node compressed_fb;
819	struct drm_mm_node *compressed_llb;
820
821	bool false_color;
822
823	/* Tracks whether the HW is actually enabled, not whether the feature is
824	 * possible. */
825	bool enabled;
826
827	struct intel_fbc_work {
828		struct delayed_work work;
829		struct drm_crtc *crtc;
830		struct drm_framebuffer *fb;
831	} *fbc_work;
832
833	enum no_fbc_reason {
834		FBC_OK, /* FBC is enabled */
835		FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
836		FBC_NO_OUTPUT, /* no outputs enabled to compress */
837		FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
838		FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
839		FBC_MODE_TOO_LARGE, /* mode too large for compression */
840		FBC_BAD_PLANE, /* fbc not supported on plane */
841		FBC_NOT_TILED, /* buffer not tiled */
842		FBC_MULTIPLE_PIPES, /* more than one pipe active */
843		FBC_MODULE_PARAM,
844		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
845	} no_fbc_reason;
846};
847
848/**
849 * HIGH_RR is the highest eDP panel refresh rate read from EDID
850 * LOW_RR is the lowest eDP panel refresh rate found from EDID
851 * parsing for same resolution.
852 */
853enum drrs_refresh_rate_type {
854	DRRS_HIGH_RR,
855	DRRS_LOW_RR,
856	DRRS_MAX_RR, /* RR count */
857};
858
859enum drrs_support_type {
860	DRRS_NOT_SUPPORTED = 0,
861	STATIC_DRRS_SUPPORT = 1,
862	SEAMLESS_DRRS_SUPPORT = 2
863};
864
865struct intel_dp;
866struct i915_drrs {
867	struct mutex mutex;
868	struct delayed_work work;
869	struct intel_dp *dp;
870	unsigned busy_frontbuffer_bits;
871	enum drrs_refresh_rate_type refresh_rate_type;
872	enum drrs_support_type type;
873};
874
875struct i915_psr {
876	struct mutex lock;
877	bool sink_support;
878	bool source_ok;
879	struct intel_dp *enabled;
880	bool active;
881	struct delayed_work work;
882	unsigned busy_frontbuffer_bits;
883	bool link_standby;
884};
885
886enum intel_pch {
887	PCH_NONE = 0,	/* No PCH present */
888	PCH_IBX,	/* Ibexpeak PCH */
889	PCH_CPT,	/* Cougarpoint PCH */
890	PCH_LPT,	/* Lynxpoint PCH */
891	PCH_SPT,        /* Sunrisepoint PCH */
892	PCH_NOP,
893};
894
895enum intel_sbi_destination {
896	SBI_ICLK,
897	SBI_MPHY,
898};
899
900#define QUIRK_PIPEA_FORCE (1<<0)
901#define QUIRK_LVDS_SSC_DISABLE (1<<1)
902#define QUIRK_INVERT_BRIGHTNESS (1<<2)
903#define QUIRK_BACKLIGHT_PRESENT (1<<3)
904#define QUIRK_PIPEB_FORCE (1<<4)
905#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
906
907struct intel_fbdev;
908struct intel_fbc_work;
909
910struct intel_gmbus {
911	struct i2c_adapter adapter;
912	u32 force_bit;
913	u32 reg0;
914	u32 gpio_reg;
915	struct i2c_algo_bit_data bit_algo;
916	struct drm_i915_private *dev_priv;
917};
918
919struct i915_suspend_saved_registers {
920	u32 saveDSPARB;
921	u32 saveLVDS;
922	u32 savePP_ON_DELAYS;
923	u32 savePP_OFF_DELAYS;
924	u32 savePP_ON;
925	u32 savePP_OFF;
926	u32 savePP_CONTROL;
927	u32 savePP_DIVISOR;
928	u32 saveFBC_CONTROL;
929	u32 saveCACHE_MODE_0;
930	u32 saveMI_ARB_STATE;
931	u32 saveSWF0[16];
932	u32 saveSWF1[16];
933	u32 saveSWF2[3];
934	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
935	u32 savePCH_PORT_HOTPLUG;
936	u16 saveGCDGMBUS;
937};
938
939struct vlv_s0ix_state {
940	/* GAM */
941	u32 wr_watermark;
942	u32 gfx_prio_ctrl;
943	u32 arb_mode;
944	u32 gfx_pend_tlb0;
945	u32 gfx_pend_tlb1;
946	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
947	u32 media_max_req_count;
948	u32 gfx_max_req_count;
949	u32 render_hwsp;
950	u32 ecochk;
951	u32 bsd_hwsp;
952	u32 blt_hwsp;
953	u32 tlb_rd_addr;
954
955	/* MBC */
956	u32 g3dctl;
957	u32 gsckgctl;
958	u32 mbctl;
959
960	/* GCP */
961	u32 ucgctl1;
962	u32 ucgctl3;
963	u32 rcgctl1;
964	u32 rcgctl2;
965	u32 rstctl;
966	u32 misccpctl;
967
968	/* GPM */
969	u32 gfxpause;
970	u32 rpdeuhwtc;
971	u32 rpdeuc;
972	u32 ecobus;
973	u32 pwrdwnupctl;
974	u32 rp_down_timeout;
975	u32 rp_deucsw;
976	u32 rcubmabdtmr;
977	u32 rcedata;
978	u32 spare2gh;
979
980	/* Display 1 CZ domain */
981	u32 gt_imr;
982	u32 gt_ier;
983	u32 pm_imr;
984	u32 pm_ier;
985	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
986
987	/* GT SA CZ domain */
988	u32 tilectl;
989	u32 gt_fifoctl;
990	u32 gtlc_wake_ctrl;
991	u32 gtlc_survive;
992	u32 pmwgicz;
993
994	/* Display 2 CZ domain */
995	u32 gu_ctl0;
996	u32 gu_ctl1;
997	u32 pcbr;
998	u32 clock_gate_dis2;
999};
1000
1001struct intel_rps_ei {
1002	u32 cz_clock;
1003	u32 render_c0;
1004	u32 media_c0;
1005};
1006
1007struct intel_gen6_power_mgmt {
1008	/*
1009	 * work, interrupts_enabled and pm_iir are protected by
1010	 * dev_priv->irq_lock
1011	 */
1012	struct work_struct work;
1013	bool interrupts_enabled;
1014	u32 pm_iir;
1015
1016	/* Frequencies are stored in potentially platform dependent multiples.
1017	 * In other words, *_freq needs to be multiplied by X to be interesting.
1018	 * Soft limits are those which are used for the dynamic reclocking done
1019	 * by the driver (raise frequencies under heavy loads, and lower for
1020	 * lighter loads). Hard limits are those imposed by the hardware.
1021	 *
1022	 * A distinction is made for overclocking, which is never enabled by
1023	 * default, and is considered to be above the hard limit if it's
1024	 * possible at all.
1025	 */
1026	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
1027	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
1028	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
1029	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
1030	u8 min_freq;		/* AKA RPn. Minimum frequency */
1031	u8 idle_freq;		/* Frequency to request when we are idle */
1032	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
1033	u8 rp1_freq;		/* "less than" RP0 power/freqency */
1034	u8 rp0_freq;		/* Non-overclocked max frequency. */
1035	u32 cz_freq;
1036
1037	int last_adj;
1038	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
1039
1040	bool enabled;
1041	struct delayed_work delayed_resume_work;
1042
1043	/* manual wa residency calculations */
1044	struct intel_rps_ei up_ei, down_ei;
1045
1046	/*
1047	 * Protects RPS/RC6 register access and PCU communication.
1048	 * Must be taken after struct_mutex if nested.
1049	 */
1050	struct mutex hw_lock;
1051};
1052
1053/* defined intel_pm.c */
1054extern spinlock_t mchdev_lock;
1055
1056struct intel_ilk_power_mgmt {
1057	u8 cur_delay;
1058	u8 min_delay;
1059	u8 max_delay;
1060	u8 fmax;
1061	u8 fstart;
1062
1063	u64 last_count1;
1064	unsigned long last_time1;
1065	unsigned long chipset_power;
1066	u64 last_count2;
1067	u64 last_time2;
1068	unsigned long gfx_power;
1069	u8 corr;
1070
1071	int c_m;
1072	int r_t;
1073};
1074
1075struct drm_i915_private;
1076struct i915_power_well;
1077
1078struct i915_power_well_ops {
1079	/*
1080	 * Synchronize the well's hw state to match the current sw state, for
1081	 * example enable/disable it based on the current refcount. Called
1082	 * during driver init and resume time, possibly after first calling
1083	 * the enable/disable handlers.
1084	 */
1085	void (*sync_hw)(struct drm_i915_private *dev_priv,
1086			struct i915_power_well *power_well);
1087	/*
1088	 * Enable the well and resources that depend on it (for example
1089	 * interrupts located on the well). Called after the 0->1 refcount
1090	 * transition.
1091	 */
1092	void (*enable)(struct drm_i915_private *dev_priv,
1093		       struct i915_power_well *power_well);
1094	/*
1095	 * Disable the well and resources that depend on it. Called after
1096	 * the 1->0 refcount transition.
1097	 */
1098	void (*disable)(struct drm_i915_private *dev_priv,
1099			struct i915_power_well *power_well);
1100	/* Returns the hw enabled state. */
1101	bool (*is_enabled)(struct drm_i915_private *dev_priv,
1102			   struct i915_power_well *power_well);
1103};
1104
1105/* Power well structure for haswell */
1106struct i915_power_well {
1107	const char *name;
1108	bool always_on;
1109	/* power well enable/disable usage count */
1110	int count;
1111	/* cached hw enabled state */
1112	bool hw_enabled;
1113	unsigned long domains;
1114	unsigned long data;
1115	const struct i915_power_well_ops *ops;
1116};
1117
1118struct i915_power_domains {
1119	/*
1120	 * Power wells needed for initialization at driver init and suspend
1121	 * time are on. They are kept on until after the first modeset.
1122	 */
1123	bool init_power_on;
1124	bool initializing;
1125	int power_well_count;
1126
1127	struct mutex lock;
1128	int domain_use_count[POWER_DOMAIN_NUM];
1129	struct i915_power_well *power_wells;
1130};
1131
1132#define MAX_L3_SLICES 2
1133struct intel_l3_parity {
1134	u32 *remap_info[MAX_L3_SLICES];
1135	struct work_struct error_work;
1136	int which_slice;
1137};
1138
1139struct i915_gem_batch_pool {
1140	struct drm_device *dev;
1141	struct list_head cache_list;
1142};
1143
1144struct i915_gem_mm {
1145	/** Memory allocator for GTT stolen memory */
1146	struct drm_mm stolen;
1147	/** List of all objects in gtt_space. Used to restore gtt
1148	 * mappings on resume */
1149	struct list_head bound_list;
1150	/**
1151	 * List of objects which are not bound to the GTT (thus
1152	 * are idle and not used by the GPU) but still have
1153	 * (presumably uncached) pages still attached.
1154	 */
1155	struct list_head unbound_list;
1156
1157	/*
1158	 * A pool of objects to use as shadow copies of client batch buffers
1159	 * when the command parser is enabled. Prevents the client from
1160	 * modifying the batch contents after software parsing.
1161	 */
1162	struct i915_gem_batch_pool batch_pool;
1163
1164	/** Usable portion of the GTT for GEM */
1165	unsigned long stolen_base; /* limited to low memory (32-bit) */
1166
1167	/** PPGTT used for aliasing the PPGTT with the GTT */
1168	struct i915_hw_ppgtt *aliasing_ppgtt;
1169
1170	struct notifier_block oom_notifier;
1171	struct shrinker shrinker;
1172	bool shrinker_no_lock_stealing;
1173
1174	/** LRU list of objects with fence regs on them. */
1175	struct list_head fence_list;
1176
1177	/**
1178	 * We leave the user IRQ off as much as possible,
1179	 * but this means that requests will finish and never
1180	 * be retired once the system goes idle. Set a timer to
1181	 * fire periodically while the ring is running. When it
1182	 * fires, go retire requests.
1183	 */
1184	struct delayed_work retire_work;
1185
1186	/**
1187	 * When we detect an idle GPU, we want to turn on
1188	 * powersaving features. So once we see that there
1189	 * are no more requests outstanding and no more
1190	 * arrive within a small period of time, we fire
1191	 * off the idle_work.
1192	 */
1193	struct delayed_work idle_work;
1194
1195	/**
1196	 * Are we in a non-interruptible section of code like
1197	 * modesetting?
1198	 */
1199	bool interruptible;
1200
1201	/**
1202	 * Is the GPU currently considered idle, or busy executing userspace
1203	 * requests?  Whilst idle, we attempt to power down the hardware and
1204	 * display clocks. In order to reduce the effect on performance, there
1205	 * is a slight delay before we do so.
1206	 */
1207	bool busy;
1208
1209	/* the indicator for dispatch video commands on two BSD rings */
1210	int bsd_ring_dispatch_index;
1211
1212	/** Bit 6 swizzling required for X tiling */
1213	uint32_t bit_6_swizzle_x;
1214	/** Bit 6 swizzling required for Y tiling */
1215	uint32_t bit_6_swizzle_y;
1216
1217	/* accounting, useful for userland debugging */
1218	spinlock_t object_stat_lock;
1219	size_t object_memory;
1220	u32 object_count;
1221};
1222
1223struct drm_i915_error_state_buf {
1224	struct drm_i915_private *i915;
1225	unsigned bytes;
1226	unsigned size;
1227	int err;
1228	u8 *buf;
1229	loff_t start;
1230	loff_t pos;
1231};
1232
1233struct i915_error_state_file_priv {
1234	struct drm_device *dev;
1235	struct drm_i915_error_state *error;
1236};
1237
1238struct i915_gpu_error {
1239	/* For hangcheck timer */
1240#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1241#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1242	/* Hang gpu twice in this window and your context gets banned */
1243#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1244
1245	struct workqueue_struct *hangcheck_wq;
1246	struct delayed_work hangcheck_work;
1247
1248	/* For reset and error_state handling. */
1249	spinlock_t lock;
1250	/* Protected by the above dev->gpu_error.lock. */
1251	struct drm_i915_error_state *first_error;
1252
1253	unsigned long missed_irq_rings;
1254
1255	/**
1256	 * State variable controlling the reset flow and count
1257	 *
1258	 * This is a counter which gets incremented when reset is triggered,
1259	 * and again when reset has been handled. So odd values (lowest bit set)
1260	 * means that reset is in progress and even values that
1261	 * (reset_counter >> 1):th reset was successfully completed.
1262	 *
1263	 * If reset is not completed succesfully, the I915_WEDGE bit is
1264	 * set meaning that hardware is terminally sour and there is no
1265	 * recovery. All waiters on the reset_queue will be woken when
1266	 * that happens.
1267	 *
1268	 * This counter is used by the wait_seqno code to notice that reset
1269	 * event happened and it needs to restart the entire ioctl (since most
1270	 * likely the seqno it waited for won't ever signal anytime soon).
1271	 *
1272	 * This is important for lock-free wait paths, where no contended lock
1273	 * naturally enforces the correct ordering between the bail-out of the
1274	 * waiter and the gpu reset work code.
1275	 */
1276	atomic_t reset_counter;
1277
1278#define I915_RESET_IN_PROGRESS_FLAG	1
1279#define I915_WEDGED			(1 << 31)
1280
1281	/**
1282	 * Waitqueue to signal when the reset has completed. Used by clients
1283	 * that wait for dev_priv->mm.wedged to settle.
1284	 */
1285	wait_queue_head_t reset_queue;
1286
1287	/* Userspace knobs for gpu hang simulation;
1288	 * combines both a ring mask, and extra flags
1289	 */
1290	u32 stop_rings;
1291#define I915_STOP_RING_ALLOW_BAN       (1 << 31)
1292#define I915_STOP_RING_ALLOW_WARN      (1 << 30)
1293
1294	/* For missed irq/seqno simulation. */
1295	unsigned int test_irq_rings;
1296
1297	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset   */
1298	bool reload_in_reset;
1299};
1300
1301enum modeset_restore {
1302	MODESET_ON_LID_OPEN,
1303	MODESET_DONE,
1304	MODESET_SUSPENDED,
1305};
1306
1307struct ddi_vbt_port_info {
1308	/*
1309	 * This is an index in the HDMI/DVI DDI buffer translation table.
1310	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1311	 * populate this field.
1312	 */
1313#define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
1314	uint8_t hdmi_level_shift;
1315
1316	uint8_t supports_dvi:1;
1317	uint8_t supports_hdmi:1;
1318	uint8_t supports_dp:1;
1319};
1320
1321enum psr_lines_to_wait {
1322	PSR_0_LINES_TO_WAIT = 0,
1323	PSR_1_LINE_TO_WAIT,
1324	PSR_4_LINES_TO_WAIT,
1325	PSR_8_LINES_TO_WAIT
1326};
1327
1328struct intel_vbt_data {
1329	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1330	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1331
1332	/* Feature bits */
1333	unsigned int int_tv_support:1;
1334	unsigned int lvds_dither:1;
1335	unsigned int lvds_vbt:1;
1336	unsigned int int_crt_support:1;
1337	unsigned int lvds_use_ssc:1;
1338	unsigned int display_clock_mode:1;
1339	unsigned int fdi_rx_polarity_inverted:1;
1340	unsigned int has_mipi:1;
1341	int lvds_ssc_freq;
1342	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1343
1344	enum drrs_support_type drrs_type;
1345
1346	/* eDP */
1347	int edp_rate;
1348	int edp_lanes;
1349	int edp_preemphasis;
1350	int edp_vswing;
1351	bool edp_initialized;
1352	bool edp_support;
1353	int edp_bpp;
1354	bool edp_low_vswing;
1355	struct edp_power_seq edp_pps;
1356
1357	struct {
1358		bool full_link;
1359		bool require_aux_wakeup;
1360		int idle_frames;
1361		enum psr_lines_to_wait lines_to_wait;
1362		int tp1_wakeup_time;
1363		int tp2_tp3_wakeup_time;
1364	} psr;
1365
1366	struct {
1367		u16 pwm_freq_hz;
1368		bool present;
1369		bool active_low_pwm;
1370		u8 min_brightness;	/* min_brightness/255 of max */
1371	} backlight;
1372
1373	/* MIPI DSI */
1374	struct {
1375		u16 port;
1376		u16 panel_id;
1377		struct mipi_config *config;
1378		struct mipi_pps_data *pps;
1379		u8 seq_version;
1380		u32 size;
1381		u8 *data;
1382		u8 *sequence[MIPI_SEQ_MAX];
1383	} dsi;
1384
1385	int crt_ddc_pin;
1386
1387	int child_dev_num;
1388	union child_device_config *child_dev;
1389
1390	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1391};
1392
1393enum intel_ddb_partitioning {
1394	INTEL_DDB_PART_1_2,
1395	INTEL_DDB_PART_5_6, /* IVB+ */
1396};
1397
1398struct intel_wm_level {
1399	bool enable;
1400	uint32_t pri_val;
1401	uint32_t spr_val;
1402	uint32_t cur_val;
1403	uint32_t fbc_val;
1404};
1405
1406struct ilk_wm_values {
1407	uint32_t wm_pipe[3];
1408	uint32_t wm_lp[3];
1409	uint32_t wm_lp_spr[3];
1410	uint32_t wm_linetime[3];
1411	bool enable_fbc_wm;
1412	enum intel_ddb_partitioning partitioning;
1413};
1414
1415struct vlv_wm_values {
1416	struct {
1417		uint16_t primary;
1418		uint16_t sprite[2];
1419		uint8_t cursor;
1420	} pipe[3];
1421
1422	struct {
1423		uint16_t plane;
1424		uint8_t cursor;
1425	} sr;
1426
1427	struct {
1428		uint8_t cursor;
1429		uint8_t sprite[2];
1430		uint8_t primary;
1431	} ddl[3];
1432};
1433
1434struct skl_ddb_entry {
1435	uint16_t start, end;	/* in number of blocks, 'end' is exclusive */
1436};
1437
1438static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1439{
1440	return entry->end - entry->start;
1441}
1442
1443static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1444				       const struct skl_ddb_entry *e2)
1445{
1446	if (e1->start == e2->start && e1->end == e2->end)
1447		return true;
1448
1449	return false;
1450}
1451
1452struct skl_ddb_allocation {
1453	struct skl_ddb_entry pipe[I915_MAX_PIPES];
1454	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
1455	struct skl_ddb_entry cursor[I915_MAX_PIPES];
1456};
1457
1458struct skl_wm_values {
1459	bool dirty[I915_MAX_PIPES];
1460	struct skl_ddb_allocation ddb;
1461	uint32_t wm_linetime[I915_MAX_PIPES];
1462	uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
1463	uint32_t cursor[I915_MAX_PIPES][8];
1464	uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
1465	uint32_t cursor_trans[I915_MAX_PIPES];
1466};
1467
1468struct skl_wm_level {
1469	bool plane_en[I915_MAX_PLANES];
1470	bool cursor_en;
1471	uint16_t plane_res_b[I915_MAX_PLANES];
1472	uint8_t plane_res_l[I915_MAX_PLANES];
1473	uint16_t cursor_res_b;
1474	uint8_t cursor_res_l;
1475};
1476
1477/*
1478 * This struct helps tracking the state needed for runtime PM, which puts the
1479 * device in PCI D3 state. Notice that when this happens, nothing on the
1480 * graphics device works, even register access, so we don't get interrupts nor
1481 * anything else.
1482 *
1483 * Every piece of our code that needs to actually touch the hardware needs to
1484 * either call intel_runtime_pm_get or call intel_display_power_get with the
1485 * appropriate power domain.
1486 *
1487 * Our driver uses the autosuspend delay feature, which means we'll only really
1488 * suspend if we stay with zero refcount for a certain amount of time. The
1489 * default value is currently very conservative (see intel_runtime_pm_enable), but
1490 * it can be changed with the standard runtime PM files from sysfs.
1491 *
1492 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1493 * goes back to false exactly before we reenable the IRQs. We use this variable
1494 * to check if someone is trying to enable/disable IRQs while they're supposed
1495 * to be disabled. This shouldn't happen and we'll print some error messages in
1496 * case it happens.
1497 *
1498 * For more, read the Documentation/power/runtime_pm.txt.
1499 */
1500struct i915_runtime_pm {
1501	bool suspended;
1502	bool irqs_enabled;
1503};
1504
1505enum intel_pipe_crc_source {
1506	INTEL_PIPE_CRC_SOURCE_NONE,
1507	INTEL_PIPE_CRC_SOURCE_PLANE1,
1508	INTEL_PIPE_CRC_SOURCE_PLANE2,
1509	INTEL_PIPE_CRC_SOURCE_PF,
1510	INTEL_PIPE_CRC_SOURCE_PIPE,
1511	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
1512	INTEL_PIPE_CRC_SOURCE_TV,
1513	INTEL_PIPE_CRC_SOURCE_DP_B,
1514	INTEL_PIPE_CRC_SOURCE_DP_C,
1515	INTEL_PIPE_CRC_SOURCE_DP_D,
1516	INTEL_PIPE_CRC_SOURCE_AUTO,
1517	INTEL_PIPE_CRC_SOURCE_MAX,
1518};
1519
1520struct intel_pipe_crc_entry {
1521	uint32_t frame;
1522	uint32_t crc[5];
1523};
1524
1525#define INTEL_PIPE_CRC_ENTRIES_NR	128
1526struct intel_pipe_crc {
1527	spinlock_t lock;
1528	bool opened;		/* exclusive access to the result file */
1529	struct intel_pipe_crc_entry *entries;
1530	enum intel_pipe_crc_source source;
1531	int head, tail;
1532	wait_queue_head_t wq;
1533};
1534
1535struct i915_frontbuffer_tracking {
1536	struct mutex lock;
1537
1538	/*
1539	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1540	 * scheduled flips.
1541	 */
1542	unsigned busy_bits;
1543	unsigned flip_bits;
1544};
1545
1546struct i915_wa_reg {
1547	u32 addr;
1548	u32 value;
1549	/* bitmask representing WA bits */
1550	u32 mask;
1551};
1552
1553#define I915_MAX_WA_REGS 16
1554
1555struct i915_workarounds {
1556	struct i915_wa_reg reg[I915_MAX_WA_REGS];
1557	u32 count;
1558};
1559
1560struct i915_virtual_gpu {
1561	bool active;
1562};
1563
1564struct drm_i915_private {
1565	struct drm_device *dev;
1566	struct kmem_cache *slab;
1567
1568	const struct intel_device_info info;
1569
1570	int relative_constants_mode;
1571
1572	void __iomem *regs;
1573
1574	struct intel_uncore uncore;
1575
1576	struct i915_virtual_gpu vgpu;
1577
1578	struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1579
1580
1581	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
1582	 * controller on different i2c buses. */
1583	struct mutex gmbus_mutex;
1584
1585	/**
1586	 * Base address of the gmbus and gpio block.
1587	 */
1588	uint32_t gpio_mmio_base;
1589
1590	/* MMIO base address for MIPI regs */
1591	uint32_t mipi_mmio_base;
1592
1593	wait_queue_head_t gmbus_wait_queue;
1594
1595	struct pci_dev *bridge_dev;
1596	struct intel_engine_cs ring[I915_NUM_RINGS];
1597	struct drm_i915_gem_object *semaphore_obj;
1598	uint32_t last_seqno, next_seqno;
1599
1600	struct drm_dma_handle *status_page_dmah;
1601	struct resource mch_res;
1602
1603	/* protects the irq masks */
1604	spinlock_t irq_lock;
1605
1606	/* protects the mmio flip data */
1607	spinlock_t mmio_flip_lock;
1608
1609	bool display_irqs_enabled;
1610
1611	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1612	struct pm_qos_request pm_qos;
1613
1614	/* DPIO indirect register protection */
1615	struct mutex dpio_lock;
1616
1617	/** Cached value of IMR to avoid reads in updating the bitfield */
1618	union {
1619		u32 irq_mask;
1620		u32 de_irq_mask[I915_MAX_PIPES];
1621	};
1622	u32 gt_irq_mask;
1623	u32 pm_irq_mask;
1624	u32 pm_rps_events;
1625	u32 pipestat_irq_mask[I915_MAX_PIPES];
1626
1627	struct work_struct hotplug_work;
1628	struct {
1629		unsigned long hpd_last_jiffies;
1630		int hpd_cnt;
1631		enum {
1632			HPD_ENABLED = 0,
1633			HPD_DISABLED = 1,
1634			HPD_MARK_DISABLED = 2
1635		} hpd_mark;
1636	} hpd_stats[HPD_NUM_PINS];
1637	u32 hpd_event_bits;
1638	struct delayed_work hotplug_reenable_work;
1639
1640	struct i915_fbc fbc;
1641	struct i915_drrs drrs;
1642	struct intel_opregion opregion;
1643	struct intel_vbt_data vbt;
1644
1645	bool preserve_bios_swizzle;
1646
1647	/* overlay */
1648	struct intel_overlay *overlay;
1649
1650	/* backlight registers and fields in struct intel_panel */
1651	struct mutex backlight_lock;
1652
1653	/* LVDS info */
1654	bool no_aux_handshake;
1655
1656	/* protects panel power sequencer state */
1657	struct mutex pps_mutex;
1658
1659	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1660	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1661	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1662
1663	unsigned int fsb_freq, mem_freq, is_ddr3;
1664	unsigned int vlv_cdclk_freq;
1665	unsigned int hpll_freq;
1666
1667	/**
1668	 * wq - Driver workqueue for GEM.
1669	 *
1670	 * NOTE: Work items scheduled here are not allowed to grab any modeset
1671	 * locks, for otherwise the flushing done in the pageflip code will
1672	 * result in deadlocks.
1673	 */
1674	struct workqueue_struct *wq;
1675
1676	/* Display functions */
1677	struct drm_i915_display_funcs display;
1678
1679	/* PCH chipset type */
1680	enum intel_pch pch_type;
1681	unsigned short pch_id;
1682
1683	unsigned long quirks;
1684
1685	enum modeset_restore modeset_restore;
1686	struct mutex modeset_restore_lock;
1687
1688	struct list_head vm_list; /* Global list of all address spaces */
1689	struct i915_gtt gtt; /* VM representing the global address space */
1690
1691	struct i915_gem_mm mm;
1692	DECLARE_HASHTABLE(mm_structs, 7);
1693	struct mutex mm_lock;
1694
1695	/* Kernel Modesetting */
1696
1697	struct sdvo_device_mapping sdvo_mappings[2];
1698
1699	struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1700	struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1701	wait_queue_head_t pending_flip_queue;
1702
1703#ifdef CONFIG_DEBUG_FS
1704	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1705#endif
1706
1707	int num_shared_dpll;
1708	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1709	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1710
1711	struct i915_workarounds workarounds;
1712
1713	/* Reclocking support */
1714	bool render_reclock_avail;
1715	bool lvds_downclock_avail;
1716	/* indicates the reduced downclock for LVDS*/
1717	int lvds_downclock;
1718
1719	struct i915_frontbuffer_tracking fb_tracking;
1720
1721	u16 orig_clock;
1722
1723	bool mchbar_need_disable;
1724
1725	struct intel_l3_parity l3_parity;
1726
1727	/* Cannot be determined by PCIID. You must always read a register. */
1728	size_t ellc_size;
1729
1730	/* gen6+ rps state */
1731	struct intel_gen6_power_mgmt rps;
1732
1733	/* ilk-only ips/rps state. Everything in here is protected by the global
1734	 * mchdev_lock in intel_pm.c */
1735	struct intel_ilk_power_mgmt ips;
1736
1737	struct i915_power_domains power_domains;
1738
1739	struct i915_psr psr;
1740
1741	struct i915_gpu_error gpu_error;
1742
1743	struct drm_i915_gem_object *vlv_pctx;
1744
1745#ifdef CONFIG_DRM_I915_FBDEV
1746	/* list of fbdev register on this device */
1747	struct intel_fbdev *fbdev;
1748	struct work_struct fbdev_suspend_work;
1749#endif
1750
1751	struct drm_property *broadcast_rgb_property;
1752	struct drm_property *force_audio_property;
1753
1754	/* hda/i915 audio component */
1755	bool audio_component_registered;
1756
1757	uint32_t hw_context_size;
1758	struct list_head context_list;
1759
1760	u32 fdi_rx_config;
1761
1762	u32 suspend_count;
1763	struct i915_suspend_saved_registers regfile;
1764	struct vlv_s0ix_state vlv_s0ix_state;
1765
1766	struct {
1767		/*
1768		 * Raw watermark latency values:
1769		 * in 0.1us units for WM0,
1770		 * in 0.5us units for WM1+.
1771		 */
1772		/* primary */
1773		uint16_t pri_latency[5];
1774		/* sprite */
1775		uint16_t spr_latency[5];
1776		/* cursor */
1777		uint16_t cur_latency[5];
1778		/*
1779		 * Raw watermark memory latency values
1780		 * for SKL for all 8 levels
1781		 * in 1us units.
1782		 */
1783		uint16_t skl_latency[8];
1784
1785		/*
1786		 * The skl_wm_values structure is a bit too big for stack
1787		 * allocation, so we keep the staging struct where we store
1788		 * intermediate results here instead.
1789		 */
1790		struct skl_wm_values skl_results;
1791
1792		/* current hardware state */
1793		union {
1794			struct ilk_wm_values hw;
1795			struct skl_wm_values skl_hw;
1796			struct vlv_wm_values vlv;
1797		};
1798	} wm;
1799
1800	struct i915_runtime_pm pm;
1801
1802	struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
1803	u32 long_hpd_port_mask;
1804	u32 short_hpd_port_mask;
1805	struct work_struct dig_port_work;
1806
1807	/*
1808	 * if we get a HPD irq from DP and a HPD irq from non-DP
1809	 * the non-DP HPD could block the workqueue on a mode config
1810	 * mutex getting, that userspace may have taken. However
1811	 * userspace is waiting on the DP workqueue to run which is
1812	 * blocked behind the non-DP one.
1813	 */
1814	struct workqueue_struct *dp_wq;
1815
1816	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1817	struct {
1818		int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
1819				  struct intel_engine_cs *ring,
1820				  struct intel_context *ctx,
1821				  struct drm_i915_gem_execbuffer2 *args,
1822				  struct list_head *vmas,
1823				  struct drm_i915_gem_object *batch_obj,
1824				  u64 exec_start, u32 flags);
1825		int (*init_rings)(struct drm_device *dev);
1826		void (*cleanup_ring)(struct intel_engine_cs *ring);
1827		void (*stop_ring)(struct intel_engine_cs *ring);
1828	} gt;
1829
1830	uint32_t request_uniq;
1831
1832	/*
1833	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1834	 * will be rejected. Instead look for a better place.
1835	 */
1836};
1837
1838static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1839{
1840	return dev->dev_private;
1841}
1842
1843static inline struct drm_i915_private *dev_to_i915(struct device *dev)
1844{
1845	return to_i915(dev_get_drvdata(dev));
1846}
1847
1848/* Iterate over initialised rings */
1849#define for_each_ring(ring__, dev_priv__, i__) \
1850	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1851		if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1852
1853enum hdmi_force_audio {
1854	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
1855	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
1856	HDMI_AUDIO_AUTO,		/* trust EDID */
1857	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
1858};
1859
1860#define I915_GTT_OFFSET_NONE ((u32)-1)
1861
1862struct drm_i915_gem_object_ops {
1863	/* Interface between the GEM object and its backing storage.
1864	 * get_pages() is called once prior to the use of the associated set
1865	 * of pages before to binding them into the GTT, and put_pages() is
1866	 * called after we no longer need them. As we expect there to be
1867	 * associated cost with migrating pages between the backing storage
1868	 * and making them available for the GPU (e.g. clflush), we may hold
1869	 * onto the pages after they are no longer referenced by the GPU
1870	 * in case they may be used again shortly (for example migrating the
1871	 * pages to a different memory domain within the GTT). put_pages()
1872	 * will therefore most likely be called when the object itself is
1873	 * being released or under memory pressure (where we attempt to
1874	 * reap pages for the shrinker).
1875	 */
1876	int (*get_pages)(struct drm_i915_gem_object *);
1877	void (*put_pages)(struct drm_i915_gem_object *);
1878	int (*dmabuf_export)(struct drm_i915_gem_object *);
1879	void (*release)(struct drm_i915_gem_object *);
1880};
1881
1882/*
1883 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1884 * considered to be the frontbuffer for the given plane interface-vise. This
1885 * doesn't mean that the hw necessarily already scans it out, but that any
1886 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1887 *
1888 * We have one bit per pipe and per scanout plane type.
1889 */
1890#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
1891#define INTEL_FRONTBUFFER_BITS \
1892	(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
1893#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
1894	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
1895#define INTEL_FRONTBUFFER_CURSOR(pipe) \
1896	(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1897#define INTEL_FRONTBUFFER_SPRITE(pipe) \
1898	(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1899#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1900	(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1901#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1902	(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
1903
1904struct drm_i915_gem_object {
1905	struct drm_gem_object base;
1906
1907	const struct drm_i915_gem_object_ops *ops;
1908
1909	/** List of VMAs backed by this object */
1910	struct list_head vma_list;
1911
1912	/** Stolen memory for this object, instead of being backed by shmem. */
1913	struct drm_mm_node *stolen;
1914	struct list_head global_list;
1915
1916	struct list_head ring_list;
1917	/** Used in execbuf to temporarily hold a ref */
1918	struct list_head obj_exec_link;
1919
1920	struct list_head batch_pool_list;
1921
1922	/**
1923	 * This is set if the object is on the active lists (has pending
1924	 * rendering and so a non-zero seqno), and is not set if it i s on
1925	 * inactive (ready to be unbound) list.
1926	 */
1927	unsigned int active:1;
1928
1929	/**
1930	 * This is set if the object has been written to since last bound
1931	 * to the GTT
1932	 */
1933	unsigned int dirty:1;
1934
1935	/**
1936	 * Fence register bits (if any) for this object.  Will be set
1937	 * as needed when mapped into the GTT.
1938	 * Protected by dev->struct_mutex.
1939	 */
1940	signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
1941
1942	/**
1943	 * Advice: are the backing pages purgeable?
1944	 */
1945	unsigned int madv:2;
1946
1947	/**
1948	 * Current tiling mode for the object.
1949	 */
1950	unsigned int tiling_mode:2;
1951	/**
1952	 * Whether the tiling parameters for the currently associated fence
1953	 * register have changed. Note that for the purposes of tracking
1954	 * tiling changes we also treat the unfenced register, the register
1955	 * slot that the object occupies whilst it executes a fenced
1956	 * command (such as BLT on gen2/3), as a "fence".
1957	 */
1958	unsigned int fence_dirty:1;
1959
1960	/**
1961	 * Is the object at the current location in the gtt mappable and
1962	 * fenceable? Used to avoid costly recalculations.
1963	 */
1964	unsigned int map_and_fenceable:1;
1965
1966	/**
1967	 * Whether the current gtt mapping needs to be mappable (and isn't just
1968	 * mappable by accident). Track pin and fault separate for a more
1969	 * accurate mappable working set.
1970	 */
1971	unsigned int fault_mappable:1;
1972	unsigned int pin_mappable:1;
1973	unsigned int pin_display:1;
1974
1975	/*
1976	 * Is the object to be mapped as read-only to the GPU
1977	 * Only honoured if hardware has relevant pte bit
1978	 */
1979	unsigned long gt_ro:1;
1980	unsigned int cache_level:3;
1981	unsigned int cache_dirty:1;
1982
1983	unsigned int has_dma_mapping:1;
1984
1985	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
1986
1987	struct sg_table *pages;
1988	int pages_pin_count;
1989
1990	/* prime dma-buf support */
1991	void *dma_buf_vmapping;
1992	int vmapping_count;
1993
1994	/** Breadcrumb of last rendering to the buffer. */
1995	struct drm_i915_gem_request *last_read_req;
1996	struct drm_i915_gem_request *last_write_req;
1997	/** Breadcrumb of last fenced GPU access to the buffer. */
1998	struct drm_i915_gem_request *last_fenced_req;
1999
2000	/** Current tiling stride for the object, if it's tiled. */
2001	uint32_t stride;
2002
2003	/** References from framebuffers, locks out tiling changes. */
2004	unsigned long framebuffer_references;
2005
2006	/** Record of address bit 17 of each page at last unbind. */
2007	unsigned long *bit_17;
2008
2009	union {
2010		/** for phy allocated objects */
2011		struct drm_dma_handle *phys_handle;
2012
2013		struct i915_gem_userptr {
2014			uintptr_t ptr;
2015			unsigned read_only :1;
2016			unsigned workers :4;
2017#define I915_GEM_USERPTR_MAX_WORKERS 15
2018
2019			struct i915_mm_struct *mm;
2020			struct i915_mmu_object *mmu_object;
2021			struct work_struct *work;
2022		} userptr;
2023	};
2024};
2025#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
2026
2027void i915_gem_track_fb(struct drm_i915_gem_object *old,
2028		       struct drm_i915_gem_object *new,
2029		       unsigned frontbuffer_bits);
2030
2031/**
2032 * Request queue structure.
2033 *
2034 * The request queue allows us to note sequence numbers that have been emitted
2035 * and may be associated with active buffers to be retired.
2036 *
2037 * By keeping this list, we can avoid having to do questionable sequence
2038 * number comparisons on buffer last_read|write_seqno. It also allows an
2039 * emission time to be associated with the request for tracking how far ahead
2040 * of the GPU the submission is.
2041 *
2042 * The requests are reference counted, so upon creation they should have an
2043 * initial reference taken using kref_init
2044 */
2045struct drm_i915_gem_request {
2046	struct kref ref;
2047
2048	/** On Which ring this request was generated */
2049	struct intel_engine_cs *ring;
2050
2051	/** GEM sequence number associated with this request. */
2052	uint32_t seqno;
2053
2054	/** Position in the ringbuffer of the start of the request */
2055	u32 head;
2056
2057	/**
2058	 * Position in the ringbuffer of the start of the postfix.
2059	 * This is required to calculate the maximum available ringbuffer
2060	 * space without overwriting the postfix.
2061	 */
2062	 u32 postfix;
2063
2064	/** Position in the ringbuffer of the end of the whole request */
2065	u32 tail;
2066
2067	/**
2068	 * Context and ring buffer related to this request
2069	 * Contexts are refcounted, so when this request is associated with a
2070	 * context, we must increment the context's refcount, to guarantee that
2071	 * it persists while any request is linked to it. Requests themselves
2072	 * are also refcounted, so the request will only be freed when the last
2073	 * reference to it is dismissed, and the code in
2074	 * i915_gem_request_free() will then decrement the refcount on the
2075	 * context.
2076	 */
2077	struct intel_context *ctx;
2078	struct intel_ringbuffer *ringbuf;
2079
2080	/** Batch buffer related to this request if any */
2081	struct drm_i915_gem_object *batch_obj;
2082
2083	/** Time at which this request was emitted, in jiffies. */
2084	unsigned long emitted_jiffies;
2085
2086	/** global list entry for this request */
2087	struct list_head list;
2088
2089	struct drm_i915_file_private *file_priv;
2090	/** file_priv list entry for this request */
2091	struct list_head client_list;
2092
2093	/** process identifier submitting this request */
2094	struct pid *pid;
2095
2096	uint32_t uniq;
2097
2098	/**
2099	 * The ELSP only accepts two elements at a time, so we queue
2100	 * context/tail pairs on a given queue (ring->execlist_queue) until the
2101	 * hardware is available. The queue serves a double purpose: we also use
2102	 * it to keep track of the up to 2 contexts currently in the hardware
2103	 * (usually one in execution and the other queued up by the GPU): We
2104	 * only remove elements from the head of the queue when the hardware
2105	 * informs us that an element has been completed.
2106	 *
2107	 * All accesses to the queue are mediated by a spinlock
2108	 * (ring->execlist_lock).
2109	 */
2110
2111	/** Execlist link in the submission queue.*/
2112	struct list_head execlist_link;
2113
2114	/** Execlists no. of times this request has been sent to the ELSP */
2115	int elsp_submitted;
2116
2117};
2118
2119void i915_gem_request_free(struct kref *req_ref);
2120
2121static inline uint32_t
2122i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
2123{
2124	return req ? req->seqno : 0;
2125}
2126
2127static inline struct intel_engine_cs *
2128i915_gem_request_get_ring(struct drm_i915_gem_request *req)
2129{
2130	return req ? req->ring : NULL;
2131}
2132
2133static inline void
2134i915_gem_request_reference(struct drm_i915_gem_request *req)
2135{
2136	kref_get(&req->ref);
2137}
2138
2139static inline void
2140i915_gem_request_unreference(struct drm_i915_gem_request *req)
2141{
2142	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
2143	kref_put(&req->ref, i915_gem_request_free);
2144}
2145
2146static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
2147					   struct drm_i915_gem_request *src)
2148{
2149	if (src)
2150		i915_gem_request_reference(src);
2151
2152	if (*pdst)
2153		i915_gem_request_unreference(*pdst);
2154
2155	*pdst = src;
2156}
2157
2158/*
2159 * XXX: i915_gem_request_completed should be here but currently needs the
2160 * definition of i915_seqno_passed() which is below. It will be moved in
2161 * a later patch when the call to i915_seqno_passed() is obsoleted...
2162 */
2163
2164struct drm_i915_file_private {
2165	struct drm_i915_private *dev_priv;
2166	struct drm_file *file;
2167
2168	struct {
2169		spinlock_t lock;
2170		struct list_head request_list;
2171		struct delayed_work idle_work;
2172	} mm;
2173	struct idr context_idr;
2174
2175	atomic_t rps_wait_boost;
2176	struct  intel_engine_cs *bsd_ring;
2177};
2178
2179/*
2180 * A command that requires special handling by the command parser.
2181 */
2182struct drm_i915_cmd_descriptor {
2183	/*
2184	 * Flags describing how the command parser processes the command.
2185	 *
2186	 * CMD_DESC_FIXED: The command has a fixed length if this is set,
2187	 *                 a length mask if not set
2188	 * CMD_DESC_SKIP: The command is allowed but does not follow the
2189	 *                standard length encoding for the opcode range in
2190	 *                which it falls
2191	 * CMD_DESC_REJECT: The command is never allowed
2192	 * CMD_DESC_REGISTER: The command should be checked against the
2193	 *                    register whitelist for the appropriate ring
2194	 * CMD_DESC_MASTER: The command is allowed if the submitting process
2195	 *                  is the DRM master
2196	 */
2197	u32 flags;
2198#define CMD_DESC_FIXED    (1<<0)
2199#define CMD_DESC_SKIP     (1<<1)
2200#define CMD_DESC_REJECT   (1<<2)
2201#define CMD_DESC_REGISTER (1<<3)
2202#define CMD_DESC_BITMASK  (1<<4)
2203#define CMD_DESC_MASTER   (1<<5)
2204
2205	/*
2206	 * The command's unique identification bits and the bitmask to get them.
2207	 * This isn't strictly the opcode field as defined in the spec and may
2208	 * also include type, subtype, and/or subop fields.
2209	 */
2210	struct {
2211		u32 value;
2212		u32 mask;
2213	} cmd;
2214
2215	/*
2216	 * The command's length. The command is either fixed length (i.e. does
2217	 * not include a length field) or has a length field mask. The flag
2218	 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
2219	 * a length mask. All command entries in a command table must include
2220	 * length information.
2221	 */
2222	union {
2223		u32 fixed;
2224		u32 mask;
2225	} length;
2226
2227	/*
2228	 * Describes where to find a register address in the command to check
2229	 * against the ring's register whitelist. Only valid if flags has the
2230	 * CMD_DESC_REGISTER bit set.
2231	 */
2232	struct {
2233		u32 offset;
2234		u32 mask;
2235	} reg;
2236
2237#define MAX_CMD_DESC_BITMASKS 3
2238	/*
2239	 * Describes command checks where a particular dword is masked and
2240	 * compared against an expected value. If the command does not match
2241	 * the expected value, the parser rejects it. Only valid if flags has
2242	 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
2243	 * are valid.
2244	 *
2245	 * If the check specifies a non-zero condition_mask then the parser
2246	 * only performs the check when the bits specified by condition_mask
2247	 * are non-zero.
2248	 */
2249	struct {
2250		u32 offset;
2251		u32 mask;
2252		u32 expected;
2253		u32 condition_offset;
2254		u32 condition_mask;
2255	} bits[MAX_CMD_DESC_BITMASKS];
2256};
2257
2258/*
2259 * A table of commands requiring special handling by the command parser.
2260 *
2261 * Each ring has an array of tables. Each table consists of an array of command
2262 * descriptors, which must be sorted with command opcodes in ascending order.
2263 */
2264struct drm_i915_cmd_table {
2265	const struct drm_i915_cmd_descriptor *table;
2266	int count;
2267};
2268
2269/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
2270#define __I915__(p) ({ \
2271	struct drm_i915_private *__p; \
2272	if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
2273		__p = (struct drm_i915_private *)p; \
2274	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
2275		__p = to_i915((struct drm_device *)p); \
2276	else \
2277		BUILD_BUG(); \
2278	__p; \
2279})
2280#define INTEL_INFO(p) 	(&__I915__(p)->info)
2281#define INTEL_DEVID(p)	(INTEL_INFO(p)->device_id)
2282#define INTEL_REVID(p)	(__I915__(p)->dev->pdev->revision)
2283
2284#define IS_I830(dev)		(INTEL_DEVID(dev) == 0x3577)
2285#define IS_845G(dev)		(INTEL_DEVID(dev) == 0x2562)
2286#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
2287#define IS_I865G(dev)		(INTEL_DEVID(dev) == 0x2572)
2288#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
2289#define IS_I915GM(dev)		(INTEL_DEVID(dev) == 0x2592)
2290#define IS_I945G(dev)		(INTEL_DEVID(dev) == 0x2772)
2291#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
2292#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
2293#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
2294#define IS_GM45(dev)		(INTEL_DEVID(dev) == 0x2A42)
2295#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
2296#define IS_PINEVIEW_G(dev)	(INTEL_DEVID(dev) == 0xa001)
2297#define IS_PINEVIEW_M(dev)	(INTEL_DEVID(dev) == 0xa011)
2298#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
2299#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
2300#define IS_IRONLAKE_M(dev)	(INTEL_DEVID(dev) == 0x0046)
2301#define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
2302#define IS_IVB_GT1(dev)		(INTEL_DEVID(dev) == 0x0156 || \
2303				 INTEL_DEVID(dev) == 0x0152 || \
2304				 INTEL_DEVID(dev) == 0x015a)
2305#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
2306#define IS_CHERRYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2307#define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
2308#define IS_BROADWELL(dev)	(!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2309#define IS_SKYLAKE(dev)	(INTEL_INFO(dev)->is_skylake)
2310#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
2311#define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
2312				 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
2313#define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
2314				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\
2315				 (INTEL_DEVID(dev) & 0xf) == 0xb ||	\
2316				 (INTEL_DEVID(dev) & 0xf) == 0xe))
2317#define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \
2318				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2319#define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
2320				 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
2321#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
2322				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2323/* ULX machines are also considered ULT. */
2324#define IS_HSW_ULX(dev)		(INTEL_DEVID(dev) == 0x0A0E || \
2325				 INTEL_DEVID(dev) == 0x0A1E)
2326#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2327
2328#define SKL_REVID_A0		(0x0)
2329#define SKL_REVID_B0		(0x1)
2330#define SKL_REVID_C0		(0x2)
2331#define SKL_REVID_D0		(0x3)
2332#define SKL_REVID_E0		(0x4)
2333
2334/*
2335 * The genX designation typically refers to the render engine, so render
2336 * capability related checks should use IS_GEN, while display and other checks
2337 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2338 * chips, etc.).
2339 */
2340#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
2341#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
2342#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
2343#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
2344#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
2345#define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
2346#define IS_GEN8(dev)	(INTEL_INFO(dev)->gen == 8)
2347#define IS_GEN9(dev)	(INTEL_INFO(dev)->gen == 9)
2348
2349#define RENDER_RING		(1<<RCS)
2350#define BSD_RING		(1<<VCS)
2351#define BLT_RING		(1<<BCS)
2352#define VEBOX_RING		(1<<VECS)
2353#define BSD2_RING		(1<<VCS2)
2354#define HAS_BSD(dev)		(INTEL_INFO(dev)->ring_mask & BSD_RING)
2355#define HAS_BSD2(dev)		(INTEL_INFO(dev)->ring_mask & BSD2_RING)
2356#define HAS_BLT(dev)		(INTEL_INFO(dev)->ring_mask & BLT_RING)
2357#define HAS_VEBOX(dev)		(INTEL_INFO(dev)->ring_mask & VEBOX_RING)
2358#define HAS_LLC(dev)		(INTEL_INFO(dev)->has_llc)
2359#define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
2360				 __I915__(dev)->ellc_size)
2361#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
2362
2363#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
2364#define HAS_LOGICAL_RING_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 8)
2365#define USES_PPGTT(dev)		(i915.enable_ppgtt)
2366#define USES_FULL_PPGTT(dev)	(i915.enable_ppgtt == 2)
2367
2368#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
2369#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
2370
2371/* Early gen2 have a totally busted CS tlb and require pinned batches. */
2372#define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
2373/*
2374 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2375 * even when in MSI mode. This results in spurious interrupt warnings if the
2376 * legacy irq no. is shared with another device. The kernel then disables that
2377 * interrupt source and so prevents the other device from working properly.
2378 */
2379#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
2380#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
2381
2382/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2383 * rows, which changed the alignment requirements and fence programming.
2384 */
2385#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
2386						      IS_I915GM(dev)))
2387#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
2388#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
2389#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
2390#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
2391#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
2392
2393#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
2394#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
2395#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
2396
2397#define HAS_IPS(dev)		(IS_HSW_ULT(dev) || IS_BROADWELL(dev))
2398
2399#define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
2400#define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
2401#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev) || \
2402				 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
2403				 IS_SKYLAKE(dev))
2404#define HAS_RUNTIME_PM(dev)	(IS_GEN6(dev) || IS_HASWELL(dev) || \
2405				 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
2406#define HAS_RC6(dev)		(INTEL_INFO(dev)->gen >= 6)
2407#define HAS_RC6p(dev)		(INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
2408
2409#define INTEL_PCH_DEVICE_ID_MASK		0xff00
2410#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
2411#define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
2412#define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
2413#define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
2414#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
2415#define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
2416#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
2417
2418#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
2419#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
2420#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2421#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2422#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
2423#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
2424#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
2425
2426#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
2427
2428/* DPF == dynamic parity feature */
2429#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2430#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
2431
2432#define GT_FREQUENCY_MULTIPLIER 50
2433#define GEN9_FREQ_SCALER 3
2434
2435#include "i915_trace.h"
2436
2437extern const struct drm_ioctl_desc i915_ioctls[];
2438extern int i915_max_ioctl;
2439
2440extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
2441extern int i915_resume_legacy(struct drm_device *dev);
2442
2443/* i915_params.c */
2444struct i915_params {
2445	int modeset;
2446	int panel_ignore_lid;
2447	int semaphores;
2448	unsigned int lvds_downclock;
2449	int lvds_channel_mode;
2450	int panel_use_ssc;
2451	int vbt_sdvo_panel_type;
2452	int enable_rc6;
2453	int enable_fbc;
2454	int enable_ppgtt;
2455	int enable_execlists;
2456	int enable_psr;
2457	unsigned int preliminary_hw_support;
2458	int disable_power_well;
2459	int enable_ips;
2460	int invert_brightness;
2461	int enable_cmd_parser;
2462	/* leave bools at the end to not create holes */
2463	bool enable_hangcheck;
2464	bool fastboot;
2465	bool prefault_disable;
2466	bool load_detect_test;
2467	bool reset;
2468	bool disable_display;
2469	bool disable_vtd_wa;
2470	int use_mmio_flip;
2471	int mmio_debug;
2472	bool verbose_state_checks;
2473	bool nuclear_pageflip;
2474};
2475extern struct i915_params i915 __read_mostly;
2476
2477				/* i915_dma.c */
2478extern int i915_driver_load(struct drm_device *, unsigned long flags);
2479extern int i915_driver_unload(struct drm_device *);
2480extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
2481extern void i915_driver_lastclose(struct drm_device * dev);
2482extern void i915_driver_preclose(struct drm_device *dev,
2483				 struct drm_file *file);
2484extern void i915_driver_postclose(struct drm_device *dev,
2485				  struct drm_file *file);
2486extern int i915_driver_device_is_agp(struct drm_device * dev);
2487#ifdef CONFIG_COMPAT
2488extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2489			      unsigned long arg);
2490#endif
2491extern int intel_gpu_reset(struct drm_device *dev);
2492extern int i915_reset(struct drm_device *dev);
2493extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2494extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2495extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2496extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2497int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2498void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2499
2500/* i915_irq.c */
2501void i915_queue_hangcheck(struct drm_device *dev);
2502__printf(3, 4)
2503void i915_handle_error(struct drm_device *dev, bool wedged,
2504		       const char *fmt, ...);
2505
2506extern void intel_irq_init(struct drm_i915_private *dev_priv);
2507extern void intel_hpd_init(struct drm_i915_private *dev_priv);
2508int intel_irq_install(struct drm_i915_private *dev_priv);
2509void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2510
2511extern void intel_uncore_sanitize(struct drm_device *dev);
2512extern void intel_uncore_early_sanitize(struct drm_device *dev,
2513					bool restore_forcewake);
2514extern void intel_uncore_init(struct drm_device *dev);
2515extern void intel_uncore_check_errors(struct drm_device *dev);
2516extern void intel_uncore_fini(struct drm_device *dev);
2517extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
2518const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
2519void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
2520				enum forcewake_domains domains);
2521void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
2522				enum forcewake_domains domains);
2523void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
2524static inline bool intel_vgpu_active(struct drm_device *dev)
2525{
2526	return to_i915(dev)->vgpu.active;
2527}
2528
2529void
2530i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2531		     u32 status_mask);
2532
2533void
2534i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2535		      u32 status_mask);
2536
2537void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
2538void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
2539void
2540ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
2541void
2542ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
2543void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
2544				  uint32_t interrupt_mask,
2545				  uint32_t enabled_irq_mask);
2546#define ibx_enable_display_interrupt(dev_priv, bits) \
2547	ibx_display_interrupt_update((dev_priv), (bits), (bits))
2548#define ibx_disable_display_interrupt(dev_priv, bits) \
2549	ibx_display_interrupt_update((dev_priv), (bits), 0)
2550
2551/* i915_gem.c */
2552int i915_gem_create_ioctl(struct drm_device *dev, void *data,
2553			  struct drm_file *file_priv);
2554int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
2555			 struct drm_file *file_priv);
2556int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
2557			  struct drm_file *file_priv);
2558int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
2559			struct drm_file *file_priv);
2560int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2561			struct drm_file *file_priv);
2562int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2563			      struct drm_file *file_priv);
2564int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
2565			     struct drm_file *file_priv);
2566void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
2567					struct intel_engine_cs *ring);
2568void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
2569					 struct drm_file *file,
2570					 struct intel_engine_cs *ring,
2571					 struct drm_i915_gem_object *obj);
2572int i915_gem_ringbuffer_submission(struct drm_device *dev,
2573				   struct drm_file *file,
2574				   struct intel_engine_cs *ring,
2575				   struct intel_context *ctx,
2576				   struct drm_i915_gem_execbuffer2 *args,
2577				   struct list_head *vmas,
2578				   struct drm_i915_gem_object *batch_obj,
2579				   u64 exec_start, u32 flags);
2580int i915_gem_execbuffer(struct drm_device *dev, void *data,
2581			struct drm_file *file_priv);
2582int i915_gem_execbuffer2(struct drm_device *dev, void *data,
2583			 struct drm_file *file_priv);
2584int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2585			struct drm_file *file_priv);
2586int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2587			       struct drm_file *file);
2588int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2589			       struct drm_file *file);
2590int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2591			    struct drm_file *file_priv);
2592int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2593			   struct drm_file *file_priv);
2594int i915_gem_set_tiling(struct drm_device *dev, void *data,
2595			struct drm_file *file_priv);
2596int i915_gem_get_tiling(struct drm_device *dev, void *data,
2597			struct drm_file *file_priv);
2598int i915_gem_init_userptr(struct drm_device *dev);
2599int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
2600			   struct drm_file *file);
2601int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2602				struct drm_file *file_priv);
2603int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2604			struct drm_file *file_priv);
2605void i915_gem_load(struct drm_device *dev);
2606void *i915_gem_object_alloc(struct drm_device *dev);
2607void i915_gem_object_free(struct drm_i915_gem_object *obj);
2608void i915_gem_object_init(struct drm_i915_gem_object *obj,
2609			 const struct drm_i915_gem_object_ops *ops);
2610struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2611						  size_t size);
2612void i915_init_vm(struct drm_i915_private *dev_priv,
2613		  struct i915_address_space *vm);
2614void i915_gem_free_object(struct drm_gem_object *obj);
2615void i915_gem_vma_destroy(struct i915_vma *vma);
2616
2617#define PIN_MAPPABLE 0x1
2618#define PIN_NONBLOCK 0x2
2619#define PIN_GLOBAL 0x4
2620#define PIN_OFFSET_BIAS 0x8
2621#define PIN_OFFSET_MASK (~4095)
2622int __must_check
2623i915_gem_object_pin(struct drm_i915_gem_object *obj,
2624		    struct i915_address_space *vm,
2625		    uint32_t alignment,
2626		    uint64_t flags);
2627int __must_check
2628i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
2629			 const struct i915_ggtt_view *view,
2630			 uint32_t alignment,
2631			 uint64_t flags);
2632
2633int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
2634		  u32 flags);
2635int __must_check i915_vma_unbind(struct i915_vma *vma);
2636int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2637void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2638void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2639
2640int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
2641				    int *needs_clflush);
2642
2643int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
2644static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2645{
2646	struct sg_page_iter sg_iter;
2647
2648	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
2649		return sg_page_iter_page(&sg_iter);
2650
2651	return NULL;
2652}
2653static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
2654{
2655	BUG_ON(obj->pages == NULL);
2656	obj->pages_pin_count++;
2657}
2658static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
2659{
2660	BUG_ON(obj->pages_pin_count == 0);
2661	obj->pages_pin_count--;
2662}
2663
2664int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2665int i915_gem_object_sync(struct drm_i915_gem_object *obj,
2666			 struct intel_engine_cs *to);
2667void i915_vma_move_to_active(struct i915_vma *vma,
2668			     struct intel_engine_cs *ring);
2669int i915_gem_dumb_create(struct drm_file *file_priv,
2670			 struct drm_device *dev,
2671			 struct drm_mode_create_dumb *args);
2672int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2673		      uint32_t handle, uint64_t *offset);
2674/**
2675 * Returns true if seq1 is later than seq2.
2676 */
2677static inline bool
2678i915_seqno_passed(uint32_t seq1, uint32_t seq2)
2679{
2680	return (int32_t)(seq1 - seq2) >= 0;
2681}
2682
2683static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
2684					      bool lazy_coherency)
2685{
2686	u32 seqno;
2687
2688	BUG_ON(req == NULL);
2689
2690	seqno = req->ring->get_seqno(req->ring, lazy_coherency);
2691
2692	return i915_seqno_passed(seqno, req->seqno);
2693}
2694
2695int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
2696int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
2697int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
2698int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2699
2700bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
2701void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
2702
2703struct drm_i915_gem_request *
2704i915_gem_find_active_request(struct intel_engine_cs *ring);
2705
2706bool i915_gem_retire_requests(struct drm_device *dev);
2707void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
2708int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
2709				      bool interruptible);
2710int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
2711
2712static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
2713{
2714	return unlikely(atomic_read(&error->reset_counter)
2715			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
2716}
2717
2718static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
2719{
2720	return atomic_read(&error->reset_counter) & I915_WEDGED;
2721}
2722
2723static inline u32 i915_reset_count(struct i915_gpu_error *error)
2724{
2725	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
2726}
2727
2728static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
2729{
2730	return dev_priv->gpu_error.stop_rings == 0 ||
2731		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
2732}
2733
2734static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
2735{
2736	return dev_priv->gpu_error.stop_rings == 0 ||
2737		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
2738}
2739
2740void i915_gem_reset(struct drm_device *dev);
2741bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2742int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
2743int __must_check i915_gem_init(struct drm_device *dev);
2744int i915_gem_init_rings(struct drm_device *dev);
2745int __must_check i915_gem_init_hw(struct drm_device *dev);
2746int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
2747void i915_gem_init_swizzling(struct drm_device *dev);
2748void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
2749int __must_check i915_gpu_idle(struct drm_device *dev);
2750int __must_check i915_gem_suspend(struct drm_device *dev);
2751int __i915_add_request(struct intel_engine_cs *ring,
2752		       struct drm_file *file,
2753		       struct drm_i915_gem_object *batch_obj);
2754#define i915_add_request(ring) \
2755	__i915_add_request(ring, NULL, NULL)
2756int __i915_wait_request(struct drm_i915_gem_request *req,
2757			unsigned reset_counter,
2758			bool interruptible,
2759			s64 *timeout,
2760			struct drm_i915_file_private *file_priv);
2761int __must_check i915_wait_request(struct drm_i915_gem_request *req);
2762int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2763int __must_check
2764i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
2765				  bool write);
2766int __must_check
2767i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2768int __must_check
2769i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2770				     u32 alignment,
2771				     struct intel_engine_cs *pipelined,
2772				     const struct i915_ggtt_view *view);
2773void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
2774					      const struct i915_ggtt_view *view);
2775int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2776				int align);
2777int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2778void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2779
2780uint32_t
2781i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
2782uint32_t
2783i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2784			    int tiling_mode, bool fenced);
2785
2786int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2787				    enum i915_cache_level cache_level);
2788
2789struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2790				struct dma_buf *dma_buf);
2791
2792struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2793				struct drm_gem_object *gem_obj, int flags);
2794
2795void i915_gem_restore_fences(struct drm_device *dev);
2796
2797unsigned long
2798i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
2799			      const struct i915_ggtt_view *view);
2800unsigned long
2801i915_gem_obj_offset(struct drm_i915_gem_object *o,
2802		    struct i915_address_space *vm);
2803static inline unsigned long
2804i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
2805{
2806	return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
2807}
2808
2809bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2810bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
2811				  const struct i915_ggtt_view *view);
2812bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2813			struct i915_address_space *vm);
2814
2815unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2816				struct i915_address_space *vm);
2817struct i915_vma *
2818i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2819		    struct i915_address_space *vm);
2820struct i915_vma *
2821i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
2822			  const struct i915_ggtt_view *view);
2823
2824struct i915_vma *
2825i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2826				  struct i915_address_space *vm);
2827struct i915_vma *
2828i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
2829				       const struct i915_ggtt_view *view);
2830
2831static inline struct i915_vma *
2832i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
2833{
2834	return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
2835}
2836bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
2837
2838/* Some GGTT VM helpers */
2839#define i915_obj_to_ggtt(obj) \
2840	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2841static inline bool i915_is_ggtt(struct i915_address_space *vm)
2842{
2843	struct i915_address_space *ggtt =
2844		&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
2845	return vm == ggtt;
2846}
2847
2848static inline struct i915_hw_ppgtt *
2849i915_vm_to_ppgtt(struct i915_address_space *vm)
2850{
2851	WARN_ON(i915_is_ggtt(vm));
2852
2853	return container_of(vm, struct i915_hw_ppgtt, base);
2854}
2855
2856
2857static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2858{
2859	return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
2860}
2861
2862static inline unsigned long
2863i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2864{
2865	return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
2866}
2867
2868static inline int __must_check
2869i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2870		      uint32_t alignment,
2871		      unsigned flags)
2872{
2873	return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
2874				   alignment, flags | PIN_GLOBAL);
2875}
2876
2877static inline int
2878i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2879{
2880	return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
2881}
2882
2883void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
2884				     const struct i915_ggtt_view *view);
2885static inline void
2886i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
2887{
2888	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
2889}
2890
2891/* i915_gem_context.c */
2892int __must_check i915_gem_context_init(struct drm_device *dev);
2893void i915_gem_context_fini(struct drm_device *dev);
2894void i915_gem_context_reset(struct drm_device *dev);
2895int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2896int i915_gem_context_enable(struct drm_i915_private *dev_priv);
2897void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2898int i915_switch_context(struct intel_engine_cs *ring,
2899			struct intel_context *to);
2900struct intel_context *
2901i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2902void i915_gem_context_free(struct kref *ctx_ref);
2903struct drm_i915_gem_object *
2904i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
2905static inline void i915_gem_context_reference(struct intel_context *ctx)
2906{
2907	kref_get(&ctx->ref);
2908}
2909
2910static inline void i915_gem_context_unreference(struct intel_context *ctx)
2911{
2912	kref_put(&ctx->ref, i915_gem_context_free);
2913}
2914
2915static inline bool i915_gem_context_is_default(const struct intel_context *c)
2916{
2917	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
2918}
2919
2920int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2921				  struct drm_file *file);
2922int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2923				   struct drm_file *file);
2924int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2925				    struct drm_file *file_priv);
2926int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2927				    struct drm_file *file_priv);
2928
2929/* i915_gem_evict.c */
2930int __must_check i915_gem_evict_something(struct drm_device *dev,
2931					  struct i915_address_space *vm,
2932					  int min_size,
2933					  unsigned alignment,
2934					  unsigned cache_level,
2935					  unsigned long start,
2936					  unsigned long end,
2937					  unsigned flags);
2938int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2939int i915_gem_evict_everything(struct drm_device *dev);
2940
2941/* belongs in i915_gem_gtt.h */
2942static inline void i915_gem_chipset_flush(struct drm_device *dev)
2943{
2944	if (INTEL_INFO(dev)->gen < 6)
2945		intel_gtt_chipset_flush();
2946}
2947
2948/* i915_gem_stolen.c */
2949int i915_gem_init_stolen(struct drm_device *dev);
2950int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
2951void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
2952void i915_gem_cleanup_stolen(struct drm_device *dev);
2953struct drm_i915_gem_object *
2954i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
2955struct drm_i915_gem_object *
2956i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2957					       u32 stolen_offset,
2958					       u32 gtt_offset,
2959					       u32 size);
2960
2961/* i915_gem_shrinker.c */
2962unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
2963			      long target,
2964			      unsigned flags);
2965#define I915_SHRINK_PURGEABLE 0x1
2966#define I915_SHRINK_UNBOUND 0x2
2967#define I915_SHRINK_BOUND 0x4
2968unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
2969void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
2970
2971
2972/* i915_gem_tiling.c */
2973static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2974{
2975	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2976
2977	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2978		obj->tiling_mode != I915_TILING_NONE;
2979}
2980
2981void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
2982void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2983void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
2984
2985/* i915_gem_debug.c */
2986#if WATCH_LISTS
2987int i915_verify_lists(struct drm_device *dev);
2988#else
2989#define i915_verify_lists(dev) 0
2990#endif
2991
2992/* i915_debugfs.c */
2993int i915_debugfs_init(struct drm_minor *minor);
2994void i915_debugfs_cleanup(struct drm_minor *minor);
2995#ifdef CONFIG_DEBUG_FS
2996void intel_display_crc_init(struct drm_device *dev);
2997#else
2998static inline void intel_display_crc_init(struct drm_device *dev) {}
2999#endif
3000
3001/* i915_gpu_error.c */
3002__printf(2, 3)
3003void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
3004int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
3005			    const struct i915_error_state_file_priv *error);
3006int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
3007			      struct drm_i915_private *i915,
3008			      size_t count, loff_t pos);
3009static inline void i915_error_state_buf_release(
3010	struct drm_i915_error_state_buf *eb)
3011{
3012	kfree(eb->buf);
3013}
3014void i915_capture_error_state(struct drm_device *dev, bool wedge,
3015			      const char *error_msg);
3016void i915_error_state_get(struct drm_device *dev,
3017			  struct i915_error_state_file_priv *error_priv);
3018void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
3019void i915_destroy_error_state(struct drm_device *dev);
3020
3021void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
3022const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3023
3024/* i915_gem_batch_pool.c */
3025void i915_gem_batch_pool_init(struct drm_device *dev,
3026			      struct i915_gem_batch_pool *pool);
3027void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
3028struct drm_i915_gem_object*
3029i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
3030
3031/* i915_cmd_parser.c */
3032int i915_cmd_parser_get_version(void);
3033int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
3034void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
3035bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
3036int i915_parse_cmds(struct intel_engine_cs *ring,
3037		    struct drm_i915_gem_object *batch_obj,
3038		    struct drm_i915_gem_object *shadow_batch_obj,
3039		    u32 batch_start_offset,
3040		    u32 batch_len,
3041		    bool is_master);
3042
3043/* i915_suspend.c */
3044extern int i915_save_state(struct drm_device *dev);
3045extern int i915_restore_state(struct drm_device *dev);
3046
3047/* i915_sysfs.c */
3048void i915_setup_sysfs(struct drm_device *dev_priv);
3049void i915_teardown_sysfs(struct drm_device *dev_priv);
3050
3051/* intel_i2c.c */
3052extern int intel_setup_gmbus(struct drm_device *dev);
3053extern void intel_teardown_gmbus(struct drm_device *dev);
3054static inline bool intel_gmbus_is_port_valid(unsigned port)
3055{
3056	return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
3057}
3058
3059extern struct i2c_adapter *intel_gmbus_get_adapter(
3060		struct drm_i915_private *dev_priv, unsigned port);
3061extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3062extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
3063static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
3064{
3065	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3066}
3067extern void intel_i2c_reset(struct drm_device *dev);
3068
3069/* intel_opregion.c */
3070#ifdef CONFIG_ACPI
3071extern int intel_opregion_setup(struct drm_device *dev);
3072extern void intel_opregion_init(struct drm_device *dev);
3073extern void intel_opregion_fini(struct drm_device *dev);
3074extern void intel_opregion_asle_intr(struct drm_device *dev);
3075extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3076					 bool enable);
3077extern int intel_opregion_notify_adapter(struct drm_device *dev,
3078					 pci_power_t state);
3079#else
3080static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
3081static inline void intel_opregion_init(struct drm_device *dev) { return; }
3082static inline void intel_opregion_fini(struct drm_device *dev) { return; }
3083static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
3084static inline int
3085intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3086{
3087	return 0;
3088}
3089static inline int
3090intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
3091{
3092	return 0;
3093}
3094#endif
3095
3096/* intel_acpi.c */
3097#ifdef CONFIG_ACPI
3098extern void intel_register_dsm_handler(void);
3099extern void intel_unregister_dsm_handler(void);
3100#else
3101static inline void intel_register_dsm_handler(void) { return; }
3102static inline void intel_unregister_dsm_handler(void) { return; }
3103#endif /* CONFIG_ACPI */
3104
3105/* modesetting */
3106extern void intel_modeset_init_hw(struct drm_device *dev);
3107extern void intel_modeset_init(struct drm_device *dev);
3108extern void intel_modeset_gem_init(struct drm_device *dev);
3109extern void intel_modeset_cleanup(struct drm_device *dev);
3110extern void intel_connector_unregister(struct intel_connector *);
3111extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
3112extern void intel_modeset_setup_hw_state(struct drm_device *dev,
3113					 bool force_restore);
3114extern void i915_redisable_vga(struct drm_device *dev);
3115extern void i915_redisable_vga_power_on(struct drm_device *dev);
3116extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
3117extern void intel_init_pch_refclk(struct drm_device *dev);
3118extern void intel_set_rps(struct drm_device *dev, u8 val);
3119extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3120				  bool enable);
3121extern void intel_detect_pch(struct drm_device *dev);
3122extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
3123extern int intel_enable_rc6(const struct drm_device *dev);
3124
3125extern bool i915_semaphore_is_enabled(struct drm_device *dev);
3126int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3127			struct drm_file *file);
3128int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
3129			       struct drm_file *file);
3130
3131/* overlay */
3132extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
3133extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3134					    struct intel_overlay_error_state *error);
3135
3136extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
3137extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3138					    struct drm_device *dev,
3139					    struct intel_display_error_state *error);
3140
3141int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3142int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
3143
3144/* intel_sideband.c */
3145u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3146void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
3147u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
3148u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
3149void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3150u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3151void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3152u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3153void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3154u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3155void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3156u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
3157void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3158u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
3159void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
3160u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3161		   enum intel_sbi_destination destination);
3162void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3163		     enum intel_sbi_destination destination);
3164u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3165void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3166
3167int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3168int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3169
3170#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3171#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
3172
3173#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3174#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3175#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3176#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
3177
3178#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3179#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3180#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3181#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
3182
3183/* Be very careful with read/write 64-bit values. On 32-bit machines, they
3184 * will be implemented using 2 32-bit writes in an arbitrary order with
3185 * an arbitrary delay between them. This can cause the hardware to
3186 * act upon the intermediate value, possibly leading to corruption and
3187 * machine death. You have been warned.
3188 */
3189#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
3190#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3191
3192#define I915_READ64_2x32(lower_reg, upper_reg) ({			\
3193	u32 upper, lower, old_upper, loop = 0;				\
3194	upper = I915_READ(upper_reg);					\
3195	do {								\
3196		old_upper = upper;					\
3197		lower = I915_READ(lower_reg);				\
3198		upper = I915_READ(upper_reg);				\
3199	} while (upper != old_upper && loop++ < 2);			\
3200	(u64)upper << 32 | lower; })
3201
3202#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
3203#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
3204
3205/* "Broadcast RGB" property */
3206#define INTEL_BROADCAST_RGB_AUTO 0
3207#define INTEL_BROADCAST_RGB_FULL 1
3208#define INTEL_BROADCAST_RGB_LIMITED 2
3209
3210static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
3211{
3212	if (IS_VALLEYVIEW(dev))
3213		return VLV_VGACNTRL;
3214	else if (INTEL_INFO(dev)->gen >= 5)
3215		return CPU_VGACNTRL;
3216	else
3217		return VGACNTRL;
3218}
3219
3220static inline void __user *to_user_ptr(u64 address)
3221{
3222	return (void __user *)(uintptr_t)address;
3223}
3224
3225static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3226{
3227	unsigned long j = msecs_to_jiffies(m);
3228
3229	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3230}
3231
3232static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3233{
3234        return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3235}
3236
3237static inline unsigned long
3238timespec_to_jiffies_timeout(const struct timespec *value)
3239{
3240	unsigned long j = timespec_to_jiffies(value);
3241
3242	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3243}
3244
3245/*
3246 * If you need to wait X milliseconds between events A and B, but event B
3247 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
3248 * when event A happened, then just before event B you call this function and
3249 * pass the timestamp as the first argument, and X as the second argument.
3250 */
3251static inline void
3252wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3253{
3254	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
3255
3256	/*
3257	 * Don't re-read the value of "jiffies" every time since it may change
3258	 * behind our back and break the math.
3259	 */
3260	tmp_jiffies = jiffies;
3261	target_jiffies = timestamp_jiffies +
3262			 msecs_to_jiffies_timeout(to_wait_ms);
3263
3264	if (time_after(target_jiffies, tmp_jiffies)) {
3265		remaining_jiffies = target_jiffies - tmp_jiffies;
3266		while (remaining_jiffies)
3267			remaining_jiffies =
3268			    schedule_timeout_uninterruptible(remaining_jiffies);
3269	}
3270}
3271
3272static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
3273				      struct drm_i915_gem_request *req)
3274{
3275	if (ring->trace_irq_req == NULL && ring->irq_get(ring))
3276		i915_gem_request_assign(&ring->trace_irq_req, req);
3277}
3278
3279#endif
3280