diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index f50309081ac7..e59480db9ee0 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
 dma_unmap_{single,page,sg} functions family to force buffer to stay in
 device domain after releasing a mapping for it. Use this attribute with
 care!
+
+DMA_ATTR_FORCE_CONTIGUOUS
+-------------------------
+
+By default DMA-mapping subsystem is allowed to assemble the buffer
+allocated by dma_alloc_attrs() function from individual pages if it can
+be mapped as contiguous chunk into device dma address space. By
+specifing this attribute the allocated buffer is forced to be contiguous
+also in physical memory.
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index b0300529ab13..4ee2304f82f9 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1141,23 +1141,13 @@ int max_width, max_height;</synopsis>
             the <methodname>page_flip</methodname> operation will be called with a
             non-NULL <parameter>event</parameter> argument pointing to a
             <structname>drm_pending_vblank_event</structname> instance. Upon page
-            flip completion the driver must fill the
-            <parameter>event</parameter>::<structfield>event</structfield>
-            <structfield>sequence</structfield>, <structfield>tv_sec</structfield>
-            and <structfield>tv_usec</structfield> fields with the associated
-            vertical blanking count and timestamp, add the event to the
-            <parameter>drm_file</parameter> list of events to be signaled, and wake
-            up any waiting process. This can be performed with
+            flip completion the driver must call <methodname>drm_send_vblank_event</methodname>
+            to fill in the event and send to wake up any waiting processes.
+            This can be performed with
             <programlisting><![CDATA[
-            struct timeval now;
-
-            event->event.sequence = drm_vblank_count_and_time(..., &now);
-            event->event.tv_sec = now.tv_sec;
-            event->event.tv_usec = now.tv_usec;
-
             spin_lock_irqsave(&dev->event_lock, flags);
-            list_add_tail(&event->base.link, &event->base.file_priv->event_list);
-            wake_up_interruptible(&event->base.file_priv->event_wait);
+            ...
+            drm_send_vblank_event(dev, pipe, event);
             spin_unlock_irqrestore(&dev->event_lock, flags);
             ]]></programlisting>
           </para>
@@ -1621,10 +1611,10 @@ void intel_crt_init(struct drm_device *dev)
     </sect2>
   </sect1>
 
-  <!-- Internals: mid-layer helper functions -->
+  <!-- Internals: kms helper functions -->
 
   <sect1>
-    <title>Mid-layer Helper Functions</title>
+    <title>Mode Setting Helper Functions</title>
     <para>
       The CRTC, encoder and connector functions provided by the drivers
       implement the DRM API. They're called by the DRM core and ioctl handlers
@@ -2106,6 +2096,21 @@ void intel_crt_init(struct drm_device *dev)
         </listitem>
       </itemizedlist>
     </sect2>
+    <sect2>
+      <title>Modeset Helper Functions Reference</title>
+!Edrivers/gpu/drm/drm_crtc_helper.c
+    </sect2>
+    <sect2>
+      <title>fbdev Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
+!Edrivers/gpu/drm/drm_fb_helper.c
+    </sect2>
+    <sect2>
+      <title>Display Port Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
+!Iinclude/drm/drm_dp_helper.h
+!Edrivers/gpu/drm/drm_dp_helper.c
+    </sect2>
   </sect1>
 
   <!-- Internals: vertical blanking -->
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
new file mode 100644
index 000000000000..b4fa934ae3a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -0,0 +1,191 @@
+NVIDIA Tegra host1x
+
+Required properties:
+- compatible: "nvidia,tegra<chip>-host1x"
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt outputs from the controller.
+- #address-cells: The number of cells used to represent physical base addresses
+  in the host1x address space. Should be 1.
+- #size-cells: The number of cells used to represent the size of an address
+  range in the host1x address space. Should be 1.
+- ranges: The mapping of the host1x address space to the CPU address space.
+
+The host1x top-level node defines a number of children, each representing one
+of the following host1x client modules:
+
+- mpe: video encoder
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-mpe"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- vi: video input
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-vi"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- epp: encoder pre-processor
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-epp"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- isp: image signal processor
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-isp"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- gr2d: 2D graphics engine
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-gr2d"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- gr3d: 3D graphics engine
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-gr3d"
+  - reg: Physical base address and length of the controller's registers.
+
+- dc: display controller
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-dc"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+  Each display controller node has a child node, named "rgb", that represents
+  the RGB output associated with the controller. It can take the following
+  optional properties:
+  - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+  - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+  - nvidia,edid: supplies a binary EDID blob
+
+- hdmi: High Definition Multimedia Interface
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-hdmi"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+  - vdd-supply: regulator for supply voltage
+  - pll-supply: regulator for PLL
+
+  Optional properties:
+  - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+  - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+  - nvidia,edid: supplies a binary EDID blob
+
+- tvo: TV encoder output
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-tvo"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- dsi: display serial interface
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-dsi"
+  - reg: Physical base address and length of the controller's registers.
+
+Example:
+
+/ {
+	...
+
+	host1x {
+		compatible = "nvidia,tegra20-host1x", "simple-bus";
+		reg = <0x50000000 0x00024000>;
+		interrupts = <0 65 0x04   /* mpcore syncpt */
+			      0 67 0x04>; /* mpcore general */
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		ranges = <0x54000000 0x54000000 0x04000000>;
+
+		mpe {
+			compatible = "nvidia,tegra20-mpe";
+			reg = <0x54040000 0x00040000>;
+			interrupts = <0 68 0x04>;
+		};
+
+		vi {
+			compatible = "nvidia,tegra20-vi";
+			reg = <0x54080000 0x00040000>;
+			interrupts = <0 69 0x04>;
+		};
+
+		epp {
+			compatible = "nvidia,tegra20-epp";
+			reg = <0x540c0000 0x00040000>;
+			interrupts = <0 70 0x04>;
+		};
+
+		isp {
+			compatible = "nvidia,tegra20-isp";
+			reg = <0x54100000 0x00040000>;
+			interrupts = <0 71 0x04>;
+		};
+
+		gr2d {
+			compatible = "nvidia,tegra20-gr2d";
+			reg = <0x54140000 0x00040000>;
+			interrupts = <0 72 0x04>;
+		};
+
+		gr3d {
+			compatible = "nvidia,tegra20-gr3d";
+			reg = <0x54180000 0x00040000>;
+		};
+
+		dc@54200000 {
+			compatible = "nvidia,tegra20-dc";
+			reg = <0x54200000 0x00040000>;
+			interrupts = <0 73 0x04>;
+
+			rgb {
+				status = "disabled";
+			};
+		};
+
+		dc@54240000 {
+			compatible = "nvidia,tegra20-dc";
+			reg = <0x54240000 0x00040000>;
+			interrupts = <0 74 0x04>;
+
+			rgb {
+				status = "disabled";
+			};
+		};
+
+		hdmi {
+			compatible = "nvidia,tegra20-hdmi";
+			reg = <0x54280000 0x00040000>;
+			interrupts = <0 75 0x04>;
+			status = "disabled";
+		};
+
+		tvo {
+			compatible = "nvidia,tegra20-tvo";
+			reg = <0x542c0000 0x00040000>;
+			interrupts = <0 76 0x04>;
+			status = "disabled";
+		};
+
+		dsi {
+			compatible = "nvidia,tegra20-dsi";
+			reg = <0x54300000 0x00040000>;
+			status = "disabled";
+		};
+	};
+
+	...
+};
diff --git a/Documentation/kref.txt b/Documentation/kref.txt
index 48ba715d5a63..ddf85a5dde0c 100644
--- a/Documentation/kref.txt
+++ b/Documentation/kref.txt
@@ -213,3 +213,91 @@ presentation on krefs, which can be found at:
 and:
   http://www.kroah.com/linux/talks/ols_2004_kref_talk/
 
+
+The above example could also be optimized using kref_get_unless_zero() in
+the following way:
+
+static struct my_data *get_entry()
+{
+	struct my_data *entry = NULL;
+	mutex_lock(&mutex);
+	if (!list_empty(&q)) {
+		entry = container_of(q.next, struct my_data, link);
+		if (!kref_get_unless_zero(&entry->refcount))
+			entry = NULL;
+	}
+	mutex_unlock(&mutex);
+	return entry;
+}
+
+static void release_entry(struct kref *ref)
+{
+	struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+	mutex_lock(&mutex);
+	list_del(&entry->link);
+	mutex_unlock(&mutex);
+	kfree(entry);
+}
+
+static void put_entry(struct my_data *entry)
+{
+	kref_put(&entry->refcount, release_entry);
+}
+
+Which is useful to remove the mutex lock around kref_put() in put_entry(), but
+it's important that kref_get_unless_zero is enclosed in the same critical
+section that finds the entry in the lookup table,
+otherwise kref_get_unless_zero may reference already freed memory.
+Note that it is illegal to use kref_get_unless_zero without checking its
+return value. If you are sure (by already having a valid pointer) that
+kref_get_unless_zero() will return true, then use kref_get() instead.
+
+The function kref_get_unless_zero also makes it possible to use rcu
+locking for lookups in the above example:
+
+struct my_data
+{
+	struct rcu_head rhead;
+	.
+	struct kref refcount;
+	.
+	.
+};
+
+static struct my_data *get_entry_rcu()
+{
+	struct my_data *entry = NULL;
+	rcu_read_lock();
+	if (!list_empty(&q)) {
+		entry = container_of(q.next, struct my_data, link);
+		if (!kref_get_unless_zero(&entry->refcount))
+			entry = NULL;
+	}
+	rcu_read_unlock();
+	return entry;
+}
+
+static void release_entry_rcu(struct kref *ref)
+{
+	struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+	mutex_lock(&mutex);
+	list_del_rcu(&entry->link);
+	mutex_unlock(&mutex);
+	kfree_rcu(entry, rhead);
+}
+
+static void put_entry(struct my_data *entry)
+{
+	kref_put(&entry->refcount, release_entry_rcu);
+}
+
+But note that the struct kref member needs to remain in valid memory for a
+rcu grace period after release_entry_rcu was called. That can be accomplished
+by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
+before using kfree, but note that synchronize_rcu() may sleep for a
+substantial amount of time.
+
+
+Thomas Hellstrom <thellstrom@vmware.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index d9c31b906ac9..6892b26025ba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2549,6 +2549,15 @@ S:	Supported
 F:	drivers/gpu/drm/exynos
 F:	include/drm/exynos*
 
+DRM DRIVERS FOR NVIDIA TEGRA
+M:	Thierry Reding <thierry.reding@avionic-design.de>
+L:	dri-devel@lists.freedesktop.org
+L:	linux-tegra@vger.kernel.org
+T:	git git://gitorious.org/thierryreding/linux.git
+S:	Maintained
+F:	drivers/gpu/drm/tegra/
+F:	Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+
 DSCC4 DRIVER
 M:	Francois Romieu <romieu@fr.zoreil.com>
 L:	netdev@vger.kernel.org
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 5383bc018571..6b2fb87c8698 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1034,7 +1034,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
 	spin_unlock_irqrestore(&mapping->lock, flags);
 }
 
-static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+					  gfp_t gfp, struct dma_attrs *attrs)
 {
 	struct page **pages;
 	int count = size >> PAGE_SHIFT;
@@ -1048,6 +1049,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
 	if (!pages)
 		return NULL;
 
+	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
+	{
+		unsigned long order = get_order(size);
+		struct page *page;
+
+		page = dma_alloc_from_contiguous(dev, count, order);
+		if (!page)
+			goto error;
+
+		__dma_clear_buffer(page, size);
+
+		for (i = 0; i < count; i++)
+			pages[i] = page + i;
+
+		return pages;
+	}
+
 	while (count) {
 		int j, order = __fls(count);
 
@@ -1081,14 +1099,21 @@ error:
 	return NULL;
 }
 
-static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+			       size_t size, struct dma_attrs *attrs)
 {
 	int count = size >> PAGE_SHIFT;
 	int array_size = count * sizeof(struct page *);
 	int i;
-	for (i = 0; i < count; i++)
-		if (pages[i])
-			__free_pages(pages[i], 0);
+
+	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+		dma_release_from_contiguous(dev, pages[0], count);
+	} else {
+		for (i = 0; i < count; i++)
+			if (pages[i])
+				__free_pages(pages[i], 0);
+	}
+
 	if (array_size <= PAGE_SIZE)
 		kfree(pages);
 	else
@@ -1250,7 +1275,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
 	if (gfp & GFP_ATOMIC)
 		return __iommu_alloc_atomic(dev, size, handle);
 
-	pages = __iommu_alloc_buffer(dev, size, gfp);
+	pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
 	if (!pages)
 		return NULL;
 
@@ -1271,7 +1296,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
 err_mapping:
 	__iommu_remove_mapping(dev, *handle, size);
 err_buffer:
-	__iommu_free_buffer(dev, pages, size);
+	__iommu_free_buffer(dev, pages, size, attrs);
 	return NULL;
 }
 
@@ -1327,7 +1352,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 	}
 
 	__iommu_remove_mapping(dev, handle, size);
-	__iommu_free_buffer(dev, pages, size);
+	__iommu_free_buffer(dev, pages, size, attrs);
 }
 
 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 6ec0fff79bc2..1042c1b90376 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -62,12 +62,6 @@
 #define I810_PTE_LOCAL		0x00000002
 #define I810_PTE_VALID		0x00000001
 #define I830_PTE_SYSTEM_CACHED  0x00000006
-/* GT PTE cache control fields */
-#define GEN6_PTE_UNCACHED	0x00000002
-#define HSW_PTE_UNCACHED	0x00000000
-#define GEN6_PTE_LLC		0x00000004
-#define GEN6_PTE_LLC_MLC	0x00000006
-#define GEN6_PTE_GFDT		0x00000008
 
 #define I810_SMRAM_MISCC	0x70
 #define I810_GFX_MEM_WIN_SIZE	0x00010000
@@ -97,7 +91,6 @@
 #define G4x_GMCH_SIZE_VT_2M	(G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
 
 #define GFX_FLSH_CNTL		0x2170 /* 915+ */
-#define GFX_FLSH_CNTL_VLV	0x101008
 
 #define I810_DRAM_CTL		0x3000
 #define I810_DRAM_ROW_0		0x00000001
@@ -148,29 +141,6 @@
 #define INTEL_I7505_AGPCTRL	0x70
 #define INTEL_I7505_MCHCFG	0x50
 
-#define SNB_GMCH_CTRL	0x50
-#define SNB_GMCH_GMS_STOLEN_MASK	0xF8
-#define SNB_GMCH_GMS_STOLEN_32M		(1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M		(2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M		(3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M	(4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M	(5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M	(6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M	(7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M	(8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M	(9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M	(0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M	(0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M	(0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M	(0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M	(0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M	(0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M	(0x10 << 3)
-#define SNB_GTT_SIZE_0M			(0 << 8)
-#define SNB_GTT_SIZE_1M			(1 << 8)
-#define SNB_GTT_SIZE_2M			(2 << 8)
-#define SNB_GTT_SIZE_MASK		(3 << 8)
-
 /* pci devices ids */
 #define PCI_DEVICE_ID_INTEL_E7221_HB	0x2588
 #define PCI_DEVICE_ID_INTEL_E7221_IG	0x258a
@@ -219,66 +189,5 @@
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB	    0x0062
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG	    0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB		0x0100  /* Desktop */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG		0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG		0x0112
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG	0x0122
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB		0x0104  /* Mobile */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG	0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG	0x0116
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG	0x0126
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB		0x0108  /* Server */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG		0x010A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB		0x0150  /* Desktop */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG		0x0152
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG		0x0162
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB		0x0154  /* Mobile */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG		0x0156
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG		0x0166
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB		0x0158  /* Server */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG		0x015A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG		0x016A
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB		0x0F00 /* VLV1 */
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG		0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB			0x0400 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG		0x0402
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG		0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG	0x0422
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB		0x0404 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG		0x0406
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG		0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG	0x0426
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB		0x0408 /* Server */
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG		0x040a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG		0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG	0x042a
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB		0x0c04
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG	0x0C02
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG	0x0C12
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG	0x0C22
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG	0x0C06
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG	0x0C16
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG	0x0C26
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG	0x0C0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG	0x0C1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG	0x0C2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG	0x0A02
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG	0x0A12
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG	0x0A22
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG	0x0A06
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG	0x0A16
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG	0x0A26
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG	0x0A0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG	0x0A1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG	0x0A2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG	0x0D12
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG	0x0D22
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG	0x0D32
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG	0x0D16
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG	0x0D26
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG	0x0D36
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG	0x0D1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG	0x0D2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG	0x0D3A
 
 #endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 38390f7c6ab6..dbd901e94ea6 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
 			stolen_size = 0;
 			break;
 		}
-	} else if (INTEL_GTT_GEN == 6) {
-		/*
-		 * SandyBridge has new memory control reg at 0x50.w
-		 */
-		u16 snb_gmch_ctl;
-		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
-		case SNB_GMCH_GMS_STOLEN_32M:
-			stolen_size = MB(32);
-			break;
-		case SNB_GMCH_GMS_STOLEN_64M:
-			stolen_size = MB(64);
-			break;
-		case SNB_GMCH_GMS_STOLEN_96M:
-			stolen_size = MB(96);
-			break;
-		case SNB_GMCH_GMS_STOLEN_128M:
-			stolen_size = MB(128);
-			break;
-		case SNB_GMCH_GMS_STOLEN_160M:
-			stolen_size = MB(160);
-			break;
-		case SNB_GMCH_GMS_STOLEN_192M:
-			stolen_size = MB(192);
-			break;
-		case SNB_GMCH_GMS_STOLEN_224M:
-			stolen_size = MB(224);
-			break;
-		case SNB_GMCH_GMS_STOLEN_256M:
-			stolen_size = MB(256);
-			break;
-		case SNB_GMCH_GMS_STOLEN_288M:
-			stolen_size = MB(288);
-			break;
-		case SNB_GMCH_GMS_STOLEN_320M:
-			stolen_size = MB(320);
-			break;
-		case SNB_GMCH_GMS_STOLEN_352M:
-			stolen_size = MB(352);
-			break;
-		case SNB_GMCH_GMS_STOLEN_384M:
-			stolen_size = MB(384);
-			break;
-		case SNB_GMCH_GMS_STOLEN_416M:
-			stolen_size = MB(416);
-			break;
-		case SNB_GMCH_GMS_STOLEN_448M:
-			stolen_size = MB(448);
-			break;
-		case SNB_GMCH_GMS_STOLEN_480M:
-			stolen_size = MB(480);
-			break;
-		case SNB_GMCH_GMS_STOLEN_512M:
-			stolen_size = MB(512);
-			break;
-		}
 	} else {
 		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
 		case I855_GMCH_GMS_STOLEN_1M:
@@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
 
 static unsigned int intel_gtt_total_entries(void)
 {
-	int size;
-
 	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
 		return i965_gtt_total_entries();
-	else if (INTEL_GTT_GEN == 6) {
-		u16 snb_gmch_ctl;
-
-		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
-		default:
-		case SNB_GTT_SIZE_0M:
-			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
-			size = MB(0);
-			break;
-		case SNB_GTT_SIZE_1M:
-			size = MB(1);
-			break;
-		case SNB_GTT_SIZE_2M:
-			size = MB(2);
-			break;
-		}
-		return size/4;
-	} else {
+	else {
 		/* On previous hardware, the GTT size was just what was
 		 * required to map the aperture.
 		 */
@@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
 {
 	u8 __iomem *reg;
 
-	if (INTEL_GTT_GEN >= 6)
-	    return true;
-
 	if (INTEL_GTT_GEN == 2) {
 		u16 gmch_ctrl;
 
@@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
 	writel(addr | pte_flags, intel_private.gtt + entry);
 }
 
-static bool gen6_check_flags(unsigned int flags)
-{
-	return true;
-}
-
-static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
-				unsigned int flags)
-{
-	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
-	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
-	u32 pte_flags;
-
-	if (type_mask == AGP_USER_MEMORY)
-		pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
-	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
-		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
-		if (gfdt)
-			pte_flags |= GEN6_PTE_GFDT;
-	} else { /* set 'normal'/'cached' to LLC by default */
-		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
-		if (gfdt)
-			pte_flags |= GEN6_PTE_GFDT;
-	}
-
-	/* gen6 has bit11-4 for physical addr bit39-32 */
-	addr |= (addr >> 28) & 0xff0;
-	writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
-			     unsigned int flags)
-{
-	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
-	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
-	u32 pte_flags;
-
-	if (type_mask == AGP_USER_MEMORY)
-		pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
-	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
-		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
-		if (gfdt)
-			pte_flags |= GEN6_PTE_GFDT;
-	} else { /* set 'normal'/'cached' to LLC by default */
-		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
-		if (gfdt)
-			pte_flags |= GEN6_PTE_GFDT;
-	}
-
-	/* gen6 has bit11-4 for physical addr bit39-32 */
-	addr |= (addr >> 28) & 0xff0;
-	writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
-				   unsigned int flags)
-{
-	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
-	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
-	u32 pte_flags;
-
-	if (type_mask == AGP_USER_MEMORY)
-		pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
-	else {
-		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
-		if (gfdt)
-			pte_flags |= GEN6_PTE_GFDT;
-	}
-
-	/* gen6 has bit11-4 for physical addr bit39-32 */
-	addr |= (addr >> 28) & 0xff0;
-	writel(addr | pte_flags, intel_private.gtt + entry);
-
-	writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
-}
-
-static void gen6_cleanup(void)
-{
-}
-
 /* Certain Gen5 chipsets require require idling the GPU before
  * unmapping anything from the GTT when VT-d is enabled.
  */
@@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
 
 static int i9xx_setup(void)
 {
-	u32 reg_addr;
+	u32 reg_addr, gtt_addr;
 	int size = KB(512);
 
 	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
 
 	reg_addr &= 0xfff80000;
 
-	if (INTEL_GTT_GEN >= 7)
-		size = MB(2);
-
 	intel_private.registers = ioremap(reg_addr, size);
 	if (!intel_private.registers)
 		return -ENOMEM;
 
-	if (INTEL_GTT_GEN == 3) {
-		u32 gtt_addr;
-
+	switch (INTEL_GTT_GEN) {
+	case 3:
 		pci_read_config_dword(intel_private.pcidev,
 				      I915_PTEADDR, &gtt_addr);
 		intel_private.gtt_bus_addr = gtt_addr;
-	} else {
-		u32 gtt_offset;
-
-		switch (INTEL_GTT_GEN) {
-		case 5:
-		case 6:
-		case 7:
-			gtt_offset = MB(2);
-			break;
-		case 4:
-		default:
-			gtt_offset =  KB(512);
-			break;
-		}
-		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+		break;
+	case 5:
+		intel_private.gtt_bus_addr = reg_addr + MB(2);
+		break;
+	default:
+		intel_private.gtt_bus_addr = reg_addr + KB(512);
+		break;
 	}
 
 	if (needs_idle_maps())
@@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
 	.check_flags = i830_check_flags,
 	.chipset_flush = i9xx_chipset_flush,
 };
-static const struct intel_gtt_driver sandybridge_gtt_driver = {
-	.gen = 6,
-	.setup = i9xx_setup,
-	.cleanup = gen6_cleanup,
-	.write_entry = gen6_write_entry,
-	.dma_mask_size = 40,
-	.check_flags = gen6_check_flags,
-	.chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver haswell_gtt_driver = {
-	.gen = 6,
-	.setup = i9xx_setup,
-	.cleanup = gen6_cleanup,
-	.write_entry = haswell_write_entry,
-	.dma_mask_size = 40,
-	.check_flags = gen6_check_flags,
-	.chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver valleyview_gtt_driver = {
-	.gen = 7,
-	.setup = i9xx_setup,
-	.cleanup = gen6_cleanup,
-	.write_entry = valleyview_write_entry,
-	.dma_mask_size = 40,
-	.check_flags = gen6_check_flags,
-};
 
 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  * driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
 	    "HD Graphics", &ironlake_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
 	    "HD Graphics", &ironlake_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
-	    "Sandybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
-	    "Sandybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
-	    "Sandybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
-	    "Sandybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
-	    "Sandybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
-	    "Sandybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
-	    "Sandybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
-	    "Ivybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
-	    "Ivybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
-	    "Ivybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
-	    "Ivybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
-	    "Ivybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
-	    "Ivybridge", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
-	    "ValleyView", &valleyview_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
-	    "Haswell", &haswell_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
-	    "Haswell", &haswell_gtt_driver },
 	{ 0, NULL, NULL }
 };
 
@@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 }
 EXPORT_SYMBOL(intel_gmch_probe);
 
-const struct intel_gtt *intel_gtt_get(void)
+struct intel_gtt *intel_gtt_get(void)
 {
 	return &intel_private.base;
 }
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 18321b68b880..983201b450f1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
 source "drivers/gpu/drm/cirrus/Kconfig"
 
 source "drivers/gpu/drm/shmobile/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2ff5cefe9ead..6f58c81cfcbc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 		drm_context.o drm_dma.o \
 		drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
 		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
-		drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+		drm_agpsupport.o drm_scatter.o drm_pci.o \
 		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_modes.o drm_edid.o \
 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,10 +16,11 @@ drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
 
 drm-usb-y   := drm_usb.o
 
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
@@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-y			+= i2c/
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 1a026ac2dfb4..3602731a6112 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
 
 static int ast_bo_move(struct ttm_buffer_object *bo,
 		       bool evict, bool interruptible,
-		       bool no_wait_reserve, bool no_wait_gpu,
+		       bool no_wait_gpu,
 		       struct ttm_mem_reg *new_mem)
 {
 	int r;
-	r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 	return r;
 }
 
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
 
 	ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
 			  ttm_bo_type_device, &astbo->placement,
-			  align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+			  align >> PAGE_SHIFT, false, NULL, acc_size,
 			  NULL, ast_bo_ttm_destroy);
 	if (ret)
 		return ret;
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
 	ast_ttm_placement(bo, pl_flag);
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 	if (ret)
 		return ret;
 
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
 
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 	if (ret)
 		return ret;
 
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 	if (ret) {
 		DRM_ERROR("pushing to VRAM failed\n");
 		return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 101e423c8991..dcd1a8c029eb 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
 };
 
 
-static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
 {
 	struct apertures_struct *ap;
 	bool primary = false;
 
 	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
 	ap->ranges[0].base = pci_resource_start(pdev, 0);
 	ap->ranges[0].size = pci_resource_len(pdev, 0);
 
@@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
 #endif
 	remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
 	kfree(ap);
+
+	return 0;
 }
 
 static int __devinit
 cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	cirrus_kick_out_firmware_fb(pdev);
+	int ret;
+
+	ret = cirrus_kick_out_firmware_fb(pdev);
+	if (ret)
+		return ret;
 
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bc83f835c830..1413a26e4905 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 
 static int cirrus_bo_move(struct ttm_buffer_object *bo,
 		       bool evict, bool interruptible,
-		       bool no_wait_reserve, bool no_wait_gpu,
+		       bool no_wait_gpu,
 		       struct ttm_mem_reg *new_mem)
 {
 	int r;
-	r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 	return r;
 }
 
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
 
 	ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
 			  ttm_bo_type_device, &cirrusbo->placement,
-			  align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+			  align >> PAGE_SHIFT, false, NULL, acc_size,
 			  NULL, cirrus_bo_ttm_destroy);
 	if (ret)
 		return ret;
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
 	cirrus_ttm_placement(bo, pl_flag);
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 	if (ret)
 		return ret;
 
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
 
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 	if (ret)
 		return ret;
 
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 	if (ret) {
 		DRM_ERROR("pushing to VRAM failed\n");
 		return ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ef1b22144d37..f2d667b8bee2 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 
-	if (crtc->gamma_store) {
-		kfree(crtc->gamma_store);
-		crtc->gamma_store = NULL;
-	}
+	kfree(crtc->gamma_store);
+	crtc->gamma_store = NULL;
 
 	drm_mode_object_put(dev, &crtc->base);
 	list_del(&crtc->head);
@@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->modes);
 	connector->edid_blob_ptr = NULL;
+	connector->status = connector_status_unknown;
 
 	list_add_tail(&connector->head, &dev->mode_config.connector_list);
 	dev->mode_config.num_connector++;
 
 	if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      dev->mode_config.edid_property,
 					      0);
 
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.dpms_property, 0);
 
  out:
@@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
 
 	for (i = 0; i < num_planes; i++) {
 		unsigned int width = r->width / (i != 0 ? hsub : 1);
+		unsigned int height = r->height / (i != 0 ? vsub : 1);
+		unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
 
 		if (!r->handles[i]) {
 			DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
 			return -EINVAL;
 		}
 
-		if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+		if ((uint64_t) width * cpp > UINT_MAX)
+			return -ERANGE;
+
+		if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+			return -ERANGE;
+
+		if (r->pitches[i] < width * cpp) {
 			DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
 			return -EINVAL;
 		}
@@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
+	if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+		return -EINVAL;
+	}
+
 	if ((config->min_width > r->width) || (r->width > config->max_width)) {
 		DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
 			  r->width, config->min_width, config->max_width);
@@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
 }
 EXPORT_SYMBOL(drm_property_destroy);
 
-void drm_connector_attach_property(struct drm_connector *connector,
-			       struct drm_property *property, uint64_t init_val)
-{
-	drm_object_attach_property(&connector->base, property, init_val);
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
-				  struct drm_property *property, uint64_t value)
-{
-	return drm_object_property_set_value(&connector->base, property, value);
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
-				  struct drm_property *property, uint64_t *val)
-{
-	return drm_object_property_get_value(&connector->base, property, val);
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
 void drm_object_attach_property(struct drm_mode_object *obj,
 				struct drm_property *property,
 				uint64_t init_val)
@@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
 	/* Delete edid, when there is none. */
 	if (!edid) {
 		connector->edid_blob_ptr = NULL;
-		ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+		ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
 		return ret;
 	}
 
 	size = EDID_LENGTH * (1 + edid->extensions);
 	connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
 							    size, edid);
+	if (!connector->edid_blob_ptr)
+		return -EINVAL;
 
-	ret = drm_connector_property_set_value(connector,
+	ret = drm_object_property_set_value(&connector->base,
 					       dev->mode_config.edid_property,
 					       connector->edid_blob_ptr->base.id);
 
@@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
 		for (i = 0; i < property->num_values; i++)
 			valid_mask |= (1ULL << property->values[i]);
 		return !(value & ~valid_mask);
+	} else if (property->flags & DRM_MODE_PROP_BLOB) {
+		/* Only the driver knows */
+		return true;
 	} else {
 		int i;
 		for (i = 0; i < property->num_values; i++)
@@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
 
 	/* store the property value if successful */
 	if (!ret)
-		drm_connector_property_set_value(connector, property, value);
+		drm_object_property_set_value(&connector->base, property, value);
 	return ret;
 }
 
@@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
 		if (encoder->funcs->reset)
 			encoder->funcs->reset(encoder);
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		connector->status = connector_status_unknown;
+
 		if (connector->funcs->reset)
 			connector->funcs->reset(connector);
+	}
 }
 EXPORT_SYMBOL(drm_mode_config_reset);
 
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1227adf74dbc..7b2d378b2576 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,35 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_edid.h>
 
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * 						connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+	struct drm_connector *connector, *tmp;
+	struct list_head panel_list;
+
+	INIT_LIST_HEAD(&panel_list);
+
+	list_for_each_entry_safe(connector, tmp,
+				 &dev->mode_config.connector_list, head) {
+		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			list_move_tail(&connector->head, &panel_list);
+	}
+
+	list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
 static bool drm_kms_helper_poll = true;
 module_param_named(poll, drm_kms_helper_poll, bool, 0600);
 
@@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
 
 /**
  * drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
  * @maxX: max width for modes
  * @maxY: max height for modes
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them.  Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes.  Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
  *
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
- *
- * FIXME: take into account monitor limits
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
  *
  * RETURNS:
  * Number of modes found on @connector.
@@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 			connector->funcs->force(connector);
 	} else {
 		connector->status = connector->funcs->detect(connector, true);
-		drm_kms_helper_poll_enable(dev);
 	}
 
+	/* Re-enable polling in case the global poll config changed. */
+	if (drm_kms_helper_poll != dev->mode_config.poll_running)
+		drm_kms_helper_poll_enable(dev);
+
+	dev->mode_config.poll_running = drm_kms_helper_poll;
+
 	if (connector->status == connector_status_disconnected) {
 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
 			connector->base.id, drm_get_connector_name(connector));
@@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
 }
 
 /**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
  * @crtc: CRTC to program
  * @mode: mode to use
- * @x: width of mode
- * @y: height of mode
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
  * Try to set @mode on @crtc.  Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
  *
  * RETURNS:
  * True if the mode was set successfully, or false otherwise.
@@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
 
 /**
  * drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
  *
  * RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
  */
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
@@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
 }
 
 /**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
  *
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
  */
 void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 {
@@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_helper_resume_force_mode);
 
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+	/* send a uevent + call fbdev */
+	drm_sysfs_hotplug_event(dev);
+	if (dev->mode_config.funcs->output_poll_changed)
+		dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 static void output_poll_execute(struct work_struct *work)
 {
@@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
 	mutex_lock(&dev->mode_config.mutex);
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
-		/* if this is HPD or polled don't check it -
-		   TV out for instance */
-		if (!connector->polled)
+		/* Ignore forced connectors. */
+		if (connector->force)
 			continue;
 
-		else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
-			repoll = true;
+		/* Ignore HPD capable connectors and connectors where we don't
+		 * want any hotplug detection at all for polling. */
+		if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+			continue;
+
+		repoll = true;
 
 		old_status = connector->status;
 		/* if we are connected and don't want to poll for disconnect
 		   skip it */
 		if (old_status == connector_status_connected &&
-		    !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
-		    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+		    !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
 			continue;
 
 		connector->status = connector->funcs->detect(connector, false);
@@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
 
 	mutex_unlock(&dev->mode_config.mutex);
 
-	if (changed) {
-		/* send a uevent + call fbdev */
-		drm_sysfs_hotplug_event(dev);
-		if (dev->mode_config.funcs->output_poll_changed)
-			dev->mode_config.funcs->output_poll_changed(dev);
-	}
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
 
 	if (repoll)
 		schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
 		return;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (connector->polled)
+		if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+					 DRM_CONNECTOR_POLL_DISCONNECT))
 			poll = true;
 	}
 
@@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
 void drm_helper_hpd_irq_event(struct drm_device *dev)
 {
+	struct drm_connector *connector;
+	enum drm_connector_status old_status;
+	bool changed = false;
+
 	if (!dev->mode_config.poll_enabled)
 		return;
 
-	/* kill timer and schedule immediate execution, this doesn't block */
-	cancel_delayed_work(&dev->mode_config.output_poll_work);
-	if (drm_kms_helper_poll)
-		schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+		/* Only handle HPD capable connectors. */
+		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+			continue;
+
+		old_status = connector->status;
+
+		connector->status = connector->funcs->detect(connector, false);
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+			      connector->base.id,
+			      drm_get_connector_name(connector),
+			      old_status, connector->status);
+		if (old_status != connector->status)
+			changed = true;
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_helper.c
similarity index 58%
rename from drivers/gpu/drm/drm_dp_i2c_helper.c
rename to drivers/gpu/drm/drm_dp_helper.c
index 7f246f212457..89e196627160 100644
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -30,6 +30,15 @@
 #include <drm/drm_dp_helper.h>
 #include <drm/drmP.h>
 
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
 /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
 static int
 i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
 {
 	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
 	int ret;
-	
+
 	ret = (*algo_data->aux_ch)(adapter, mode,
 				   write_byte, read_byte);
 	return ret;
@@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
 {
 	(void) i2c_algo_dp_aux_address(adapter, 0, false);
 	(void) i2c_algo_dp_aux_stop(adapter, false);
-					   
 }
 
 static int
@@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
 	return 0;
 }
 
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
 int
 i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
 {
 	int error;
-	
+
 	error = i2c_dp_aux_prepare_bus(adapter);
 	if (error)
 		return error;
@@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
 	return error;
 }
 EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+	return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+			     int lane)
+{
+	int i = DP_LANE0_1_STATUS + (lane >> 1);
+	int s = (lane & 1) * 4;
+	u8 l = dp_link_status(link_status, i);
+	return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			  int lane_count)
+{
+	u8 lane_align;
+	u8 lane_status;
+	int lane;
+
+	lane_align = dp_link_status(link_status,
+				    DP_LANE_ALIGN_STATUS_UPDATED);
+	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return false;
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+			return false;
+	}
+	return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			      int lane_count)
+{
+	int lane;
+	u8 lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return false;
+	}
+	return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+				     int lane)
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+		 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+					  int lane)
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+		 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+		udelay(100);
+	else
+		mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+		udelay(400);
+	else
+		mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+	switch (link_rate) {
+	case 162000:
+	default:
+		return DP_LINK_BW_1_62;
+	case 270000:
+		return DP_LINK_BW_2_7;
+	case 540000:
+		return DP_LINK_BW_5_4;
+	}
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+	switch (link_bw) {
+	case DP_LINK_BW_1_62:
+	default:
+		return 162000;
+	case DP_LINK_BW_2_7:
+		return 270000;
+	case DP_LINK_BW_5_4:
+		return 540000;
+	}
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fadcd44ff196..5a3770fbd770 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
 
 static bool drm_edid_is_zero(u8 *in_edid, int length)
 {
-	int i;
-	u32 *raw_edid = (u32 *)in_edid;
+	if (memchr_inv(in_edid, 0, length))
+		return false;
 
-	for (i = 0; i < length / 4; i++)
-		if (*(raw_edid + i) != 0)
-			return false;
 	return true;
 }
 
@@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
 }
 EXPORT_SYMBOL(drm_find_cea_extension);
 
+/*
+ * Looks for a CEA mode matching given drm_display_mode.
+ * Returns its CEA Video ID code, or 0 if not found.
+ */
+u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+{
+	struct drm_display_mode *cea_mode;
+	u8 mode;
+
+	for (mode = 0; mode < drm_num_cea_modes; mode++) {
+		cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+
+		if (drm_mode_equal(to_match, cea_mode))
+			return mode + 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
 static int
 do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
 {
@@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
 	if (len >= 12)
 		connector->audio_latency[1] = db[12];
 
-	DRM_LOG_KMS("HDMI: DVI dual %d, "
+	DRM_DEBUG_KMS("HDMI: DVI dual %d, "
 		    "max TMDS clock %d, "
 		    "latency present %d %d, "
 		    "video latency %d %d, "
@@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
 	return num_modes;
 }
 EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+	uint8_t i;
+
+	for (i = 0; i < drm_num_cea_modes; i++)
+		if (drm_mode_equal(mode, &edid_cea_modes[i]))
+			return i + 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4d58d7e6af3f..954d175bd7fa 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,8 @@
  *      Dave Airlie <airlied@linux.ie>
  *      Jesse Barnes <jesse.barnes@intel.com>
  */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/sysrq.h>
 #include <linux/slab.h>
@@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
 
 static LIST_HEAD(kernel_fb_helper_list);
 
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
 /* simple single crtc case helper function */
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
@@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
 			if (mode->force) {
 				const char *s;
 				switch (mode->force) {
-				case DRM_FORCE_OFF: s = "OFF"; break;
-				case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+				case DRM_FORCE_OFF:
+					s = "OFF";
+					break;
+				case DRM_FORCE_ON_DIGITAL:
+					s = "ON - dig";
+					break;
 				default:
-				case DRM_FORCE_ON: s = "ON"; break;
+				case DRM_FORCE_ON:
+					s = "ON";
+					break;
 				}
 
 				DRM_INFO("forcing %s connector %s\n",
@@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
 	if (panic_timeout < 0)
 		return 0;
 
-	printk(KERN_ERR "panic occurred, switching back to text console\n");
+	pr_err("panic occurred, switching back to text console\n");
 	return drm_fb_helper_force_kernel_mode();
 }
 EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
 		for (j = 0; j < fb_helper->connector_count; j++) {
 			connector = fb_helper->connector_info[j]->connector;
 			connector->funcs->dpms(connector, dpms_mode);
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 				dev->mode_config.dpms_property, dpms_mode);
 		}
 	}
@@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 	if (!list_empty(&fb_helper->kernel_fb_list)) {
 		list_del(&fb_helper->kernel_fb_list);
 		if (list_empty(&kernel_fb_helper_list)) {
-			printk(KERN_INFO "drm: unregistered panic notifier\n");
+			pr_info("drm: unregistered panic notifier\n");
 			atomic_notifier_chain_unregister(&panic_notifier_list,
 							 &paniced);
 			unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 
 	/* if driver picks 8 or 16 by default use that
 	   for both depth/bpp */
-	if (preferred_bpp != sizes.surface_bpp) {
+	if (preferred_bpp != sizes.surface_bpp)
 		sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
-	}
+
 	/* first up get a count of crtcs now in use and new min/maxes width/heights */
 	for (i = 0; i < fb_helper->connector_count; i++) {
 		struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 	info = fb_helper->fbdev;
 
 	/* set the fb pointer */
-	for (i = 0; i < fb_helper->crtc_count; i++) {
+	for (i = 0; i < fb_helper->crtc_count; i++)
 		fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
-	}
 
 	if (new_fb) {
 		info->var.pixclock = 0;
-		if (register_framebuffer(info) < 0) {
+		if (register_framebuffer(info) < 0)
 			return -EINVAL;
-		}
 
-		printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-		       info->fix.id);
+		dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+				info->node, info->fix.id);
 
 	} else {
 		drm_fb_helper_set_par(info);
@@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 	/* Switch back to kernel console on panic */
 	/* multi card linked list maybe */
 	if (list_empty(&kernel_fb_helper_list)) {
-		printk(KERN_INFO "drm: registered panic notifier\n");
+		dev_info(fb_helper->dev->dev, "registered panic notifier\n");
 		atomic_notifier_chain_register(&panic_notifier_list,
 					       &paniced);
 		register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
 {
 	bool enable;
 
-	if (strict) {
+	if (strict)
 		enable = connector->status == connector_status_connected;
-	} else {
+	else
 		enable = connector->status != connector_status_disconnected;
-	}
+
 	return enable;
 }
 
@@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
 	for (c = 0; c < fb_helper->crtc_count; c++) {
 		crtc = &fb_helper->crtc_info[c];
 
-		if ((encoder->possible_crtcs & (1 << c)) == 0) {
+		if ((encoder->possible_crtcs & (1 << c)) == 0)
 			continue;
-		}
 
 		for (o = 0; o < n; o++)
 			if (best_crtcs[o] == crtc)
@@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
 			sizeof(struct drm_display_mode *), GFP_KERNEL);
 	enabled = kcalloc(dev->mode_config.num_connector,
 			  sizeof(bool), GFP_KERNEL);
+	if (!crtcs || !modes || !enabled) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto out;
+	}
+
 
 	drm_enable_connectors(fb_helper, enabled);
 
@@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
 		}
 	}
 
+out:
 	kfree(crtcs);
 	kfree(modes);
 	kfree(enabled);
@@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
 
 /**
  * drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
  *
  * LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
  *
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
  * At the moment, this is a cloned configuration across all heads with
  * a new framebuffer object as the backing store.
  *
@@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
 	/*
 	 * we shouldn't end up with no modes here.
 	 */
-	if (count == 0) {
-		printk(KERN_INFO "No connectors reported connected with modes\n");
-	}
+	if (count == 0)
+		dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
 	drm_setup_crtcs(fb_helper);
 
 	return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
 
 /**
  * drm_fb_helper_hotplug_event - respond to a hotplug notification by
- *                               probing all the outputs attached to the fb.
+ *                               probing all the outputs attached to the fb
  * @fb_helper: the drm_fb_helper
  *
  * LOCKING:
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c3745c4d46d8..80254547a3f8 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
 	hashed_key = hash_long(key, ht->order);
 	DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
 	h_list = &ht->table[hashed_key];
-	hlist_for_each(list, h_list) {
-		entry = hlist_entry(list, struct drm_hash_item, head);
+	hlist_for_each_entry(entry, list, h_list, head)
 		DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
-	}
 }
 
 static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
 
 	hashed_key = hash_long(key, ht->order);
 	h_list = &ht->table[hashed_key];
-	hlist_for_each(list, h_list) {
-		entry = hlist_entry(list, struct drm_hash_item, head);
+	hlist_for_each_entry(entry, list, h_list, head) {
 		if (entry->key == key)
 			return list;
 		if (entry->key > key)
@@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
 	return NULL;
 }
 
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+					      unsigned long key)
+{
+	struct drm_hash_item *entry;
+	struct hlist_head *h_list;
+	struct hlist_node *list;
+	unsigned int hashed_key;
+
+	hashed_key = hash_long(key, ht->order);
+	h_list = &ht->table[hashed_key];
+	hlist_for_each_entry_rcu(entry, list, h_list, head) {
+		if (entry->key == key)
+			return list;
+		if (entry->key > key)
+			break;
+	}
+	return NULL;
+}
 
 int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 {
@@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 	hashed_key = hash_long(key, ht->order);
 	h_list = &ht->table[hashed_key];
 	parent = NULL;
-	hlist_for_each(list, h_list) {
-		entry = hlist_entry(list, struct drm_hash_item, head);
+	hlist_for_each_entry(entry, list, h_list, head) {
 		if (entry->key == key)
 			return -EINVAL;
 		if (entry->key > key)
@@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 		parent = list;
 	}
 	if (parent) {
-		hlist_add_after(parent, &item->head);
+		hlist_add_after_rcu(parent, &item->head);
 	} else {
-		hlist_add_head(&item->head, h_list);
+		hlist_add_head_rcu(&item->head, h_list);
 	}
 	return 0;
 }
@@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
 {
 	struct hlist_node *list;
 
-	list = drm_ht_find_key(ht, key);
+	list = drm_ht_find_key_rcu(ht, key);
 	if (!list)
 		return -EINVAL;
 
@@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
 
 	list = drm_ht_find_key(ht, key);
 	if (list) {
-		hlist_del_init(list);
+		hlist_del_init_rcu(list);
 		return 0;
 	}
 	return -EINVAL;
@@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
 
 int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 {
-	hlist_del_init(&item->head);
+	hlist_del_init_rcu(&item->head);
 	return 0;
 }
 EXPORT_SYMBOL(drm_ht_remove_item);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 23dd97506f28..e77bd8b57df2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 		req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
 		req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
 		break;
+	case DRM_CAP_TIMESTAMP_MONOTONIC:
+		req->value = drm_timestamp_monotonic;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3a3d0ce891b9..19c01ca3cc76 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	s64 diff_ns;
 	int vblrc;
 	struct timeval tvblank;
+	int count = DRM_TIMESTAMP_MAXRETRIES;
 
 	/* Prevent vblank irq processing while disabling vblank irqs,
 	 * so no updates of timestamps or count can happen after we've
@@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	do {
 		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
 		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+	if (!count)
+		vblrc = 0;
 
 	/* Compute time difference to stored timestamp of last vblank
 	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 					  unsigned flags,
 					  struct drm_crtc *refcrtc)
 {
-	struct timeval stime, raw_time;
+	ktime_t stime, etime, mono_time_offset;
+	struct timeval tv_etime;
 	struct drm_display_mode *mode;
 	int vbl_status, vtotal, vdisplay;
 	int vpos, hpos, i;
@@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 		preempt_disable();
 
 		/* Get system timestamp before query. */
-		do_gettimeofday(&stime);
+		stime = ktime_get();
 
 		/* Get vertical and horizontal scanout pos. vpos, hpos. */
 		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
 
 		/* Get system timestamp after query. */
-		do_gettimeofday(&raw_time);
+		etime = ktime_get();
+		if (!drm_timestamp_monotonic)
+			mono_time_offset = ktime_get_monotonic_offset();
 
 		preempt_enable();
 
@@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 			return -EIO;
 		}
 
-		duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+		duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 
 		/* Accept result with <  max_error nsecs timing uncertainty. */
 		if (duration_ns <= (s64) *max_error)
@@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 		vbl_status |= 0x8;
 	}
 
+	if (!drm_timestamp_monotonic)
+		etime = ktime_sub(etime, mono_time_offset);
+
+	/* save this only for debugging purposes */
+	tv_etime = ktime_to_timeval(etime);
 	/* Subtract time delta from raw timestamp to get final
 	 * vblank_time timestamp for end of vblank.
 	 */
-	*vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+	etime = ktime_sub_ns(etime, delta_ns);
+	*vblank_time = ktime_to_timeval(etime);
 
 	DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
 		  crtc, (int)vbl_status, hpos, vpos,
-		  (long)raw_time.tv_sec, (long)raw_time.tv_usec,
+		  (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
 		  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
 		  (int)duration_ns/1000, i);
 
@@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 }
 EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
 
+static struct timeval get_drm_timestamp(void)
+{
+	ktime_t now;
+
+	now = ktime_get();
+	if (!drm_timestamp_monotonic)
+		now = ktime_sub(now, ktime_get_monotonic_offset());
+
+	return ktime_to_timeval(now);
+}
+
 /**
  * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
  * vblank interval.
@@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
 	}
 
 	/* GPU high precision timestamp query unsupported or failed.
-	 * Return gettimeofday timestamp as best estimate.
+	 * Return current monotonic/gettimeofday timestamp as best estimate.
 	 */
-	do_gettimeofday(tvblank);
+	*tvblank = get_drm_timestamp();
 
 	return 0;
 }
@@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 }
 EXPORT_SYMBOL(drm_vblank_count_and_time);
 
+static void send_vblank_event(struct drm_device *dev,
+		struct drm_pending_vblank_event *e,
+		unsigned long seq, struct timeval *now)
+{
+	WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+	e->event.sequence = seq;
+	e->event.tv_sec = now->tv_sec;
+	e->event.tv_usec = now->tv_usec;
+
+	list_add_tail(&e->base.link,
+		      &e->base.file_priv->event_list);
+	wake_up_interruptible(&e->base.file_priv->event_wait);
+	trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+					 e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+		struct drm_pending_vblank_event *e)
+{
+	struct timeval now;
+	unsigned int seq;
+	if (crtc >= 0) {
+		seq = drm_vblank_count_and_time(dev, crtc, &now);
+	} else {
+		seq = 0;
+
+		now = get_drm_timestamp();
+	}
+	send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
 /**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
@@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
 void drm_vblank_off(struct drm_device *dev, int crtc)
 {
 	struct drm_pending_vblank_event *e, *t;
@@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
 
 	/* Send any queued vblank events, lest the natives grow disquiet */
 	seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+	spin_lock(&dev->event_lock);
 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
 		if (e->pipe != crtc)
 			continue;
 		DRM_DEBUG("Sending premature vblank event on disable: \
 			  wanted %d, current %d\n",
 			  e->event.sequence, seq);
-
-		e->event.sequence = seq;
-		e->event.tv_sec = now.tv_sec;
-		e->event.tv_usec = now.tv_usec;
+		list_del(&e->base.link);
 		drm_vblank_put(dev, e->pipe);
-		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-		wake_up_interruptible(&e->base.file_priv->event_wait);
-		trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
-						 e->event.sequence);
+		send_vblank_event(dev, e, seq, &now);
 	}
+	spin_unlock(&dev->event_lock);
 
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
@@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
 
 	e->event.sequence = vblwait->request.sequence;
 	if ((seq - vblwait->request.sequence) <= (1 << 23)) {
-		e->event.sequence = seq;
-		e->event.tv_sec = now.tv_sec;
-		e->event.tv_usec = now.tv_usec;
 		drm_vblank_put(dev, pipe);
-		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-		wake_up_interruptible(&e->base.file_priv->event_wait);
+		send_vblank_event(dev, e, seq, &now);
 		vblwait->reply.sequence = seq;
-		trace_drm_vblank_event_delivered(current->pid, pipe,
-						 vblwait->request.sequence);
 	} else {
 		/* drm_handle_vblank_events will call drm_vblank_put */
 		list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
 		DRM_DEBUG("vblank event on %d, current %d\n",
 			  e->event.sequence, seq);
 
-		e->event.sequence = seq;
-		e->event.tv_sec = now.tv_sec;
-		e->event.tv_usec = now.tv_usec;
+		list_del(&e->base.link);
 		drm_vblank_put(dev, e->pipe);
-		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-		wake_up_interruptible(&e->base.file_priv->event_wait);
-		trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
-						 e->event.sequence);
+		send_vblank_event(dev, e, seq, &now);
 	}
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 59450f39bf96..d8da30e90db5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -46,7 +46,7 @@
  *
  * Describe @mode using DRM_DEBUG.
  */
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
 {
 	DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
 			"0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
  * RETURNS:
  * @mode->hdisplay
  */
-int drm_mode_width(struct drm_display_mode *mode)
+int drm_mode_width(const struct drm_display_mode *mode)
 {
 	return mode->hdisplay;
 
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
  * RETURNS:
  * @mode->vdisplay
  */
-int drm_mode_height(struct drm_display_mode *mode)
+int drm_mode_height(const struct drm_display_mode *mode)
 {
 	return mode->vdisplay;
 }
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
  * RETURNS:
  * True if the modes are equal, false otherwise.
  */
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
 {
 	/* do clock check convert to PICOS so fb modes get matched
 	 * the same */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index ba33144257e5..754bc96e10c7 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
 {
 	struct pci_dev *root;
 	int pos;
-	u32 lnkcap, lnkcap2;
+	u32 lnkcap = 0, lnkcap2 = 0;
 
 	*mask = 0;
 	if (!dev->pdev)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c236fd27eba6..200e104f1fa0 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
 EXPORT_SYMBOL(drm_timestamp_precision);
 
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
 MODULE_AUTHOR(CORE_AUTHOR);
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output");
 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
 
 module_param_named(debug, drm_debug, int, 0600);
 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
 
 struct idr drm_minors_idr;
 
@@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
 	if (!file_priv->master)
 		return -EINVAL;
 
-	if (!file_priv->minor->master &&
-	    file_priv->minor->master != file_priv->master) {
-		mutex_lock(&dev->struct_mutex);
-		file_priv->minor->master = drm_master_get(file_priv->master);
-		file_priv->is_master = 1;
-		if (dev->driver->master_set) {
-			ret = dev->driver->master_set(dev, file_priv, false);
-			if (unlikely(ret != 0)) {
-				file_priv->is_master = 0;
-				drm_master_put(&file_priv->minor->master);
-			}
+	if (file_priv->minor->master)
+		return -EINVAL;
+
+	mutex_lock(&dev->struct_mutex);
+	file_priv->minor->master = drm_master_get(file_priv->master);
+	file_priv->is_master = 1;
+	if (dev->driver->master_set) {
+		ret = dev->driver->master_set(dev, file_priv, false);
+		if (unlikely(ret != 0)) {
+			file_priv->is_master = 0;
+			drm_master_put(&file_priv->minor->master);
 		}
-		mutex_unlock(&dev->struct_mutex);
 	}
+	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
 	drm_put_minor(&dev->primary);
 
 	list_del(&dev->driver_item);
-	if (dev->devname) {
-		kfree(dev->devname);
-		dev->devname = NULL;
-	}
+	kfree(dev->devname);
 	kfree(dev);
 }
 EXPORT_SYMBOL(drm_put_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 05cd8fe062af..02296653a058 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
 	uint64_t dpms_status;
 	int ret;
 
-	ret = drm_connector_property_get_value(connector,
+	ret = drm_object_property_get_value(&connector->base,
 					    dev->mode_config.dpms_property,
 					    &dpms_status);
 	if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
 		return 0;
 	}
 
-	ret = drm_connector_property_get_value(connector, prop, &subconnector);
+	ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
 	if (ret)
 		return 0;
 
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
 		return 0;
 	}
 
-	ret = drm_connector_property_get_value(connector, prop, &subconnector);
+	ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
 	if (ret)
 		return 0;
 
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb03..1d1f1e5e33f0 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
 	  Choose this option if you have a Samsung SoC EXYNOS chipset.
 	  If M is selected the module will be called exynosdrm.
 
+config DRM_EXYNOS_IOMMU
+	bool "EXYNOS DRM IOMMU Support"
+	depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+	help
+	  Choose this option if you want to use IOMMU feature for DRM.
+
 config DRM_EXYNOS_DMABUF
 	bool "EXYNOS DRM DMABUF"
 	depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
 	depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
 	help
 	  Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+	bool "Exynos DRM IPP"
+	depends on DRM_EXYNOS
+	help
+	  Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+	bool "Exynos DRM FIMC"
+	depends on DRM_EXYNOS_IPP
+	help
+	  Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+	bool "Exynos DRM Rotator"
+	depends on DRM_EXYNOS_IPP
+	help
+	  Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+	bool "Exynos DRM GSC"
+	depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+	help
+	  Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..639b49e1ec05 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
 		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
 		exynos_drm_plane.o
 
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o exynos_mixer.o \
 					   exynos_drm_hdmi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)	+= exynos_drm_vidi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_G2D)	+= exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP)	+= exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC)	+= exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR)	+= exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC)	+= exynos_drm_gsc.o
 
 obj-$(CONFIG_DRM_EXYNOS)		+= exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1d..bef43e0342a6 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
 	{ },
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmiddc_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
 		/* end node */
 	}
 };
+#endif
 
 struct i2c_driver ddc_driver = {
 	.driver = {
 		.name = "exynos-hdmiddc",
 		.owner = THIS_MODULE,
-		.of_match_table = hdmiddc_match_types,
+		.of_match_table = of_match_ptr(hdmiddc_match_types),
 	},
 	.id_table	= ddc_idtable,
 	.probe		= s5p_ddc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..9601bad47a2e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,89 +33,64 @@
 static int lowlevel_buffer_allocate(struct drm_device *dev,
 		unsigned int flags, struct exynos_drm_gem_buf *buf)
 {
-	dma_addr_t start_addr;
-	unsigned int npages, i = 0;
-	struct scatterlist *sgl;
 	int ret = 0;
+	enum dma_attr attr;
+	unsigned int nr_pages;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (IS_NONCONTIG_BUFFER(flags)) {
-		DRM_DEBUG_KMS("not support allocation type.\n");
-		return -EINVAL;
-	}
-
 	if (buf->dma_addr) {
 		DRM_DEBUG_KMS("already allocated.\n");
 		return 0;
 	}
 
-	if (buf->size >= SZ_1M) {
-		npages = buf->size >> SECTION_SHIFT;
-		buf->page_size = SECTION_SIZE;
-	} else if (buf->size >= SZ_64K) {
-		npages = buf->size >> 16;
-		buf->page_size = SZ_64K;
-	} else {
-		npages = buf->size >> PAGE_SHIFT;
-		buf->page_size = PAGE_SIZE;
-	}
+	init_dma_attrs(&buf->dma_attrs);
 
-	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!buf->sgt) {
-		DRM_ERROR("failed to allocate sg table.\n");
-		return -ENOMEM;
-	}
+	/*
+	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+	 * region will be allocated else physically contiguous
+	 * as possible.
+	 */
+	if (flags & EXYNOS_BO_CONTIG)
+		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
 
-	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
-	if (ret < 0) {
-		DRM_ERROR("failed to initialize sg table.\n");
-		kfree(buf->sgt);
-		buf->sgt = NULL;
-		return -ENOMEM;
-	}
+	/*
+	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+	 * else cachable mapping.
+	 */
+	if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+		attr = DMA_ATTR_WRITE_COMBINE;
+	else
+		attr = DMA_ATTR_NON_CONSISTENT;
 
-	buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
-			&buf->dma_addr, GFP_KERNEL);
-	if (!buf->kvaddr) {
-		DRM_ERROR("failed to allocate buffer.\n");
-		ret = -ENOMEM;
-		goto err1;
-	}
+	dma_set_attr(attr, &buf->dma_attrs);
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
 
-	buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+	buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+			&buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
 	if (!buf->pages) {
-		DRM_ERROR("failed to allocate pages.\n");
+		DRM_ERROR("failed to allocate buffer.\n");
+		return -ENOMEM;
+	}
+
+	nr_pages = buf->size >> PAGE_SHIFT;
+	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+	if (!buf->sgt) {
+		DRM_ERROR("failed to get sg table.\n");
 		ret = -ENOMEM;
-		goto err2;
+		goto err_free_attrs;
 	}
 
-	sgl = buf->sgt->sgl;
-	start_addr = buf->dma_addr;
-
-	while (i < npages) {
-		buf->pages[i] = phys_to_page(start_addr);
-		sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
-		sg_dma_address(sgl) = start_addr;
-		start_addr += buf->page_size;
-		sgl = sg_next(sgl);
-		i++;
-	}
-
-	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)buf->kvaddr,
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
 			(unsigned long)buf->dma_addr,
 			buf->size);
 
 	return ret;
-err2:
-	dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
-			(dma_addr_t)buf->dma_addr);
+
+err_free_attrs:
+	dma_free_attrs(dev->dev, buf->size, buf->pages,
+			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
 	buf->dma_addr = (dma_addr_t)NULL;
-err1:
-	sg_free_table(buf->sgt);
-	kfree(buf->sgt);
-	buf->sgt = NULL;
 
 	return ret;
 }
@@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
 {
 	DRM_DEBUG_KMS("%s.\n", __FILE__);
 
-	/*
-	 * release only physically continuous memory and
-	 * non-continuous memory would be released by exynos
-	 * gem framework.
-	 */
-	if (IS_NONCONTIG_BUFFER(flags)) {
-		DRM_DEBUG_KMS("not support allocation type.\n");
-		return;
-	}
-
 	if (!buf->dma_addr) {
 		DRM_DEBUG_KMS("dma_addr is invalid.\n");
 		return;
 	}
 
-	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)buf->kvaddr,
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
 			(unsigned long)buf->dma_addr,
 			buf->size);
 
@@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
 	kfree(buf->sgt);
 	buf->sgt = NULL;
 
-	kfree(buf->pages);
-	buf->pages = NULL;
-
-	dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
-				(dma_addr_t)buf->dma_addr);
+	dma_free_attrs(dev->dev, buf->size, buf->pages,
+				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
 	buf->dma_addr = (dma_addr_t)NULL;
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba2..25cf16285033 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
 void exynos_drm_fini_buf(struct drm_device *dev,
 				struct exynos_drm_gem_buf *buffer);
 
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
 int exynos_drm_alloc_buf(struct drm_device *dev,
 				struct exynos_drm_gem_buf *buf,
 				unsigned int flags);
 
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
 void exynos_drm_free_buf(struct drm_device *dev,
 				unsigned int flags,
 				struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4f..2efa4b031d73 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
 			goto out;
 		}
 
+		spin_lock_irq(&dev->event_lock);
 		list_add_tail(&event->base.link,
 				&dev_priv->pageflip_event_list);
+		spin_unlock_irq(&dev->event_lock);
 
 		crtc->fb = fb;
 		ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
 						    NULL);
 		if (ret) {
 			crtc->fb = old_fb;
+
+			spin_lock_irq(&dev->event_lock);
 			drm_vblank_put(dev, exynos_crtc->pipe);
 			list_del(&event->base.link);
+			spin_unlock_irq(&dev->event_lock);
 
 			goto out;
 		}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..61d5a8402eb8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,70 +30,108 @@
 
 #include <linux/dma-buf.h>
 
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
-		unsigned int page_size)
+struct exynos_drm_dmabuf_attachment {
+	struct sg_table sgt;
+	enum dma_data_direction dir;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+					struct device *dev,
+					struct dma_buf_attachment *attach)
 {
-	struct sg_table *sgt = NULL;
-	struct scatterlist *sgl;
-	int i, ret;
+	struct exynos_drm_dmabuf_attachment *exynos_attach;
 
-	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
-	if (!sgt)
-		goto out;
+	exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+	if (!exynos_attach)
+		return -ENOMEM;
 
-	ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
-	if (ret)
-		goto err_free_sgt;
+	exynos_attach->dir = DMA_NONE;
+	attach->priv = exynos_attach;
 
-	if (page_size < PAGE_SIZE)
-		page_size = PAGE_SIZE;
+	return 0;
+}
 
-	for_each_sg(sgt->sgl, sgl, nr_pages, i)
-		sg_set_page(sgl, pages[i], page_size, 0);
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+					struct dma_buf_attachment *attach)
+{
+	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+	struct sg_table *sgt;
 
-	return sgt;
+	if (!exynos_attach)
+		return;
 
-err_free_sgt:
-	kfree(sgt);
-	sgt = NULL;
-out:
-	return NULL;
+	sgt = &exynos_attach->sgt;
+
+	if (exynos_attach->dir != DMA_NONE)
+		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+				exynos_attach->dir);
+
+	sg_free_table(sgt);
+	kfree(exynos_attach);
+	attach->priv = NULL;
 }
 
 static struct sg_table *
 		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
 					enum dma_data_direction dir)
 {
+	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
 	struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
 	struct drm_device *dev = gem_obj->base.dev;
 	struct exynos_drm_gem_buf *buf;
+	struct scatterlist *rd, *wr;
 	struct sg_table *sgt = NULL;
-	unsigned int npages;
-	int nents;
+	unsigned int i;
+	int nents, ret;
 
 	DRM_DEBUG_PRIME("%s\n", __FILE__);
 
-	mutex_lock(&dev->struct_mutex);
+	if (WARN_ON(dir == DMA_NONE))
+		return ERR_PTR(-EINVAL);
+
+	/* just return current sgt if already requested. */
+	if (exynos_attach->dir == dir)
+		return &exynos_attach->sgt;
+
+	/* reattaching is not allowed. */
+	if (WARN_ON(exynos_attach->dir != DMA_NONE))
+		return ERR_PTR(-EBUSY);
 
 	buf = gem_obj->buffer;
+	if (!buf) {
+		DRM_ERROR("buffer is null.\n");
+		return ERR_PTR(-ENOMEM);
+	}
 
-	/* there should always be pages allocated. */
-	if (!buf->pages) {
-		DRM_ERROR("pages is null.\n");
+	sgt = &exynos_attach->sgt;
+
+	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("failed to alloc sgt.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	rd = buf->sgt->sgl;
+	wr = sgt->sgl;
+	for (i = 0; i < sgt->orig_nents; ++i) {
+		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+		rd = sg_next(rd);
+		wr = sg_next(wr);
+	}
+
+	nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+	if (!nents) {
+		DRM_ERROR("failed to map sgl with iommu.\n");
+		sgt = ERR_PTR(-EIO);
 		goto err_unlock;
 	}
 
-	npages = buf->size / buf->page_size;
+	exynos_attach->dir = dir;
+	attach->priv = exynos_attach;
 
-	sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
-	if (!sgt) {
-		DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
-		goto err_unlock;
-	}
-	nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-
-	DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
-			npages, buf->size, buf->page_size);
+	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
 
 err_unlock:
 	mutex_unlock(&dev->struct_mutex);
@@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 						struct sg_table *sgt,
 						enum dma_data_direction dir)
 {
-	dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-	sg_free_table(sgt);
-	kfree(sgt);
-	sgt = NULL;
+	/* Nothing to do. */
 }
 
 static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
 }
 
 static struct dma_buf_ops exynos_dmabuf_ops = {
+	.attach			= exynos_gem_attach_dma_buf,
+	.detach			= exynos_gem_detach_dma_buf,
 	.map_dma_buf		= exynos_gem_map_dma_buf,
 	.unmap_dma_buf		= exynos_gem_unmap_dma_buf,
 	.kmap			= exynos_gem_dmabuf_kmap,
@@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
 	struct scatterlist *sgl;
 	struct exynos_drm_gem_obj *exynos_gem_obj;
 	struct exynos_drm_gem_buf *buffer;
-	struct page *page;
 	int ret;
 
 	DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
 		goto err_unmap_attach;
 	}
 
-	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
-	if (!buffer->pages) {
-		DRM_ERROR("failed to allocate pages.\n");
+	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
+	if (!exynos_gem_obj) {
 		ret = -ENOMEM;
 		goto err_free_buffer;
 	}
 
-	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
-	if (!exynos_gem_obj) {
-		ret = -ENOMEM;
-		goto err_free_pages;
-	}
-
 	sgl = sgt->sgl;
 
-	if (sgt->nents == 1) {
-		buffer->dma_addr = sg_dma_address(sgt->sgl);
-		buffer->size = sg_dma_len(sgt->sgl);
+	buffer->size = dma_buf->size;
+	buffer->dma_addr = sg_dma_address(sgl);
 
+	if (sgt->nents == 1) {
 		/* always physically continuous memory if sgt->nents is 1. */
 		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
 	} else {
-		unsigned int i = 0;
-
-		buffer->dma_addr = sg_dma_address(sgl);
-		while (i < sgt->nents) {
-			buffer->pages[i] = sg_page(sgl);
-			buffer->size += sg_dma_len(sgl);
-			sgl = sg_next(sgl);
-			i++;
-		}
-
+		/*
+		 * this case could be CONTIG or NONCONTIG type but for now
+		 * sets NONCONTIG.
+		 * TODO. we have to find a way that exporter can notify
+		 * the type of its own buffer to importer.
+		 */
 		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
 	}
 
@@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
 
 	return &exynos_gem_obj->base;
 
-err_free_pages:
-	kfree(buffer->pages);
-	buffer->pages = NULL;
 err_free_buffer:
 	kfree(buffer);
 	buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..e0a8e8024b01 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,8 @@
 #include "exynos_drm_vidi.h"
 #include "exynos_drm_dmabuf.h"
 #include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
 
 #define DRIVER_NAME	"exynos"
 #define DRIVER_DESC	"Samsung SoC DRM"
@@ -49,6 +51,9 @@
 
 #define VBLANK_OFF_DELAY	50000
 
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
 	struct exynos_drm_private *private;
@@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 	INIT_LIST_HEAD(&private->pageflip_event_list);
 	dev->dev_private = (void *)private;
 
+	/*
+	 * create mapping to manage iommu table and set a pointer to iommu
+	 * mapping structure to iommu_mapping of private data.
+	 * also this iommu_mapping can be used to check if iommu is supported
+	 * or not.
+	 */
+	ret = drm_create_iommu_mapping(dev);
+	if (ret < 0) {
+		DRM_ERROR("failed to create iommu mapping.\n");
+		goto err_crtc;
+	}
+
 	drm_mode_config_init(dev);
 
 	/* init kms poll for handling hpd */
@@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 	for (nr = 0; nr < MAX_CRTC; nr++) {
 		ret = exynos_drm_crtc_create(dev, nr);
 		if (ret)
-			goto err_crtc;
+			goto err_release_iommu_mapping;
 	}
 
 	for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 
 		plane = exynos_plane_init(dev, possible_crtcs, false);
 		if (!plane)
-			goto err_crtc;
+			goto err_release_iommu_mapping;
 	}
 
 	ret = drm_vblank_init(dev, MAX_CRTC);
 	if (ret)
-		goto err_crtc;
+		goto err_release_iommu_mapping;
 
 	/*
 	 * probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +143,8 @@ err_drm_device:
 	exynos_drm_device_unregister(dev);
 err_vblank:
 	drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+	drm_release_iommu_mapping(dev);
 err_crtc:
 	drm_mode_config_cleanup(dev);
 	kfree(private);
@@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
 	drm_vblank_cleanup(dev);
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
+
+	drm_release_iommu_mapping(dev);
 	kfree(dev->dev_private);
 
 	dev->dev_private = NULL;
@@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
 			exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
 			exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+			exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+			exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+			exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+			exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
 	DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 	exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
 
 	return drm_platform_init(&exynos_drm_driver, pdev);
@@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
 	ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
 	if (ret < 0)
 		goto out_common_hdmi;
+
+	ret = exynos_platform_device_hdmi_register();
+	if (ret < 0)
+		goto out_common_hdmi_dev;
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
 		goto out_g2d;
 #endif
 
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+	ret = platform_driver_register(&fimc_driver);
+	if (ret < 0)
+		goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+	ret = platform_driver_register(&rotator_driver);
+	if (ret < 0)
+		goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+	ret = platform_driver_register(&gsc_driver);
+	if (ret < 0)
+		goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+	ret = platform_driver_register(&ipp_driver);
+	if (ret < 0)
+		goto out_ipp;
+#endif
+
 	ret = platform_driver_register(&exynos_drm_platform_driver);
 	if (ret < 0)
+		goto out_drm;
+
+	exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+				NULL, 0);
+	if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+		ret = PTR_ERR(exynos_drm_pdev);
 		goto out;
+	}
 
 	return 0;
 
 out:
+	platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+	platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+	platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+	platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+	platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_G2D
 	platform_driver_unregister(&g2d_driver);
 out_g2d:
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
 	platform_driver_unregister(&vidi_driver);
+out_vidi:
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_HDMI
+	exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
 	platform_driver_unregister(&exynos_drm_common_hdmi_driver);
 out_common_hdmi:
 	platform_driver_unregister(&mixer_driver);
@@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
 {
 	DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+	platform_device_unregister(exynos_drm_pdev);
+
 	platform_driver_unregister(&exynos_drm_platform_driver);
 
+#ifdef CONFIG_DRM_EXYNOS_IPP
+	platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+	platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+	platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+	platform_driver_unregister(&fimc_driver);
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_G2D
 	platform_driver_unregister(&g2d_driver);
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_HDMI
+	exynos_platform_device_hdmi_unregister();
 	platform_driver_unregister(&exynos_drm_common_hdmi_driver);
 	platform_driver_unregister(&mixer_driver);
 	platform_driver_unregister(&hdmi_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..f5a97745bf93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
  * @commit: apply hardware specific overlay data to registers.
  * @enable: enable hardware specific overlay.
  * @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- *	hardware overlay is disabled.
  */
 struct exynos_drm_overlay_ops {
 	void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
 	void (*commit)(struct device *subdrv_dev, int zpos);
 	void (*enable)(struct device *subdrv_dev, int zpos);
 	void (*disable)(struct device *subdrv_dev, int zpos);
-	void (*wait_for_vblank)(struct device *subdrv_dev);
 };
 
 /*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
  * @pixel_format: fourcc pixel format of this overlay
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *	      allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
  * @zpos: order of overlay layer(z position).
  * @default_win: a window to be enabled.
  * @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
 	unsigned int pitch;
 	uint32_t pixel_format;
 	dma_addr_t dma_addr[MAX_FB_BUFFER];
-	void __iomem *vaddr[MAX_FB_BUFFER];
 	int zpos;
 
 	bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ *	hardware overlay is updated.
  */
 struct exynos_drm_manager_ops {
 	void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
 	void (*commit)(struct device *subdrv_dev);
 	int (*enable_vblank)(struct device *subdrv_dev);
 	void (*disable_vblank)(struct device *subdrv_dev);
+	void (*wait_for_vblank)(struct device *subdrv_dev);
 };
 
 /*
@@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
 	struct device		*dev;
 	struct list_head	inuse_cmdlist;
 	struct list_head	event_list;
-	struct list_head	gem_list;
-	unsigned int		gem_nr;
+	struct list_head	userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+	struct device	*dev;
+	struct list_head	event_list;
 };
 
 struct drm_exynos_file_private {
 	struct exynos_drm_g2d_private	*g2d_priv;
+	struct exynos_drm_ipp_private	*ipp_priv;
 };
 
 /*
  * Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ *	with iommu, device address space starts from this address
+ *	otherwise default one.
+ * @da_space_size: size of device address space.
+ *	if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
  */
 struct exynos_drm_private {
 	struct drm_fb_helper *fb_helper;
@@ -255,6 +265,10 @@ struct exynos_drm_private {
 	struct drm_crtc *crtc[MAX_CRTC];
 	struct drm_property *plane_zpos_property;
 	struct drm_property *crtc_mode_property;
+
+	unsigned long da_start;
+	unsigned long da_space_size;
+	unsigned long da_space_order;
 };
 
 /*
@@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
 
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
 extern struct platform_driver fimd_driver;
 extern struct platform_driver hdmi_driver;
 extern struct platform_driver mixer_driver;
 extern struct platform_driver exynos_drm_common_hdmi_driver;
 extern struct platform_driver vidi_driver;
 extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index f2df06c603f7..301485215a70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -234,6 +234,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
 	exynos_encoder->dpms = DRM_MODE_DPMS_ON;
 }
 
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+	struct exynos_drm_encoder *exynos_encoder;
+	struct exynos_drm_manager_ops *ops;
+	struct drm_device *dev = fb->dev;
+	struct drm_encoder *encoder;
+
+	/*
+	 * make sure that overlay data are updated to real hardware
+	 * for all encoders.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		exynos_encoder = to_exynos_encoder(encoder);
+		ops = exynos_encoder->manager->ops;
+
+		/*
+		 * wait for vblank interrupt
+		 * - this makes sure that overlay data are updated to
+		 *	real hardware.
+		 */
+		if (ops->wait_for_vblank)
+			ops->wait_for_vblank(exynos_encoder->manager->dev);
+	}
+}
+
+
 static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
 {
 	struct drm_plane *plane;
@@ -505,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
 
 	if (overlay_ops && overlay_ops->disable)
 		overlay_ops->disable(manager->dev, zpos);
-
-	/*
-	 * wait for vblank interrupt
-	 * - this makes sure that hardware overlay is disabled to avoid
-	 * for the dma accesses to memory after gem buffer was released
-	 * because the setting for disabling the overlay will be updated
-	 * at vsync.
-	 */
-	if (overlay_ops && overlay_ops->wait_for_vblank)
-		overlay_ops->wait_for_vblank(manager->dev);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a1..88bb25a2a917 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
 
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..5426cc5a5e8d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,10 +30,13 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
 
 #define to_exynos_fb(x)	container_of(x, struct exynos_drm_fb, fb)
 
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
 	struct exynos_drm_gem_obj	*exynos_gem_obj[MAX_FB_BUFFER];
 };
 
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+				struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+	unsigned int flags;
+
+	/*
+	 * if exynos drm driver supports iommu then framebuffer can use
+	 * all the buffer types.
+	 */
+	if (is_drm_iommu_supported(drm_dev))
+		return 0;
+
+	flags = exynos_gem_obj->flags;
+
+	/*
+	 * without iommu support, not support physically non-continuous memory
+	 * for framebuffer.
+	 */
+	if (IS_NONCONTIG_BUFFER(flags)) {
+		DRM_ERROR("cannot use this gem memory type for fb.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 {
 	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
+	/* make sure that overlay data are updated before relesing fb. */
+	exynos_drm_encoder_complete_scanout(fb);
+
 	drm_framebuffer_cleanup(fb);
 
 	for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
 			    struct drm_gem_object *obj)
 {
 	struct exynos_drm_fb *exynos_fb;
+	struct exynos_drm_gem_obj *exynos_gem_obj;
 	int ret;
 
+	exynos_gem_obj = to_exynos_gem_obj(obj);
+
+	ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+	if (ret < 0) {
+		DRM_ERROR("cannot use this gem memory type for fb.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
 	if (!exynos_fb) {
 		DRM_ERROR("failed to allocate exynos drm framebuffer\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
+	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+	exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
 	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
 	if (ret) {
 		DRM_ERROR("failed to initialize framebuffer\n");
 		return ERR_PTR(ret);
 	}
 
-	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-	exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-
 	return &exynos_fb->fb;
 }
 
@@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 		      struct drm_mode_fb_cmd2 *mode_cmd)
 {
 	struct drm_gem_object *obj;
-	struct drm_framebuffer *fb;
 	struct exynos_drm_fb *exynos_fb;
-	int i;
+	int i, ret;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 		return ERR_PTR(-ENOENT);
 	}
 
-	fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
-	if (IS_ERR(fb)) {
-		drm_gem_object_unreference_unlocked(obj);
-		return fb;
+	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+	if (!exynos_fb) {
+		DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+		return ERR_PTR(-ENOMEM);
 	}
 
-	exynos_fb = to_exynos_fb(fb);
+	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+	exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
 	exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
 
 	DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
 
 	for (i = 1; i < exynos_fb->buf_cnt; i++) {
+		struct exynos_drm_gem_obj *exynos_gem_obj;
+		int ret;
+
 		obj = drm_gem_object_lookup(dev, file_priv,
 				mode_cmd->handles[i]);
 		if (!obj) {
 			DRM_ERROR("failed to lookup gem object\n");
-			exynos_drm_fb_destroy(fb);
+			kfree(exynos_fb);
 			return ERR_PTR(-ENOENT);
 		}
 
+		exynos_gem_obj = to_exynos_gem_obj(obj);
+
+		ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+		if (ret < 0) {
+			DRM_ERROR("cannot use this gem memory type for fb.\n");
+			kfree(exynos_fb);
+			return ERR_PTR(ret);
+		}
+
 		exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
 	}
 
-	return fb;
+	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+	if (ret) {
+		for (i = 0; i < exynos_fb->buf_cnt; i++) {
+			struct exynos_drm_gem_obj *gem_obj;
+
+			gem_obj = exynos_fb->exynos_gem_obj[i];
+			drm_gem_object_unreference_unlocked(&gem_obj->base);
+		}
+
+		kfree(exynos_fb);
+		return ERR_PTR(ret);
+	}
+
+	return &exynos_fb->fb;
 }
 
 struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
 	if (!buffer)
 		return NULL;
 
-	DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
-			(unsigned long)buffer->kvaddr,
-			(unsigned long)buffer->dma_addr);
+	DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
 
 	return buffer;
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7466c4414cb..f433eb7533a9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
 	struct exynos_drm_gem_obj	*exynos_gem_obj;
 };
 
+static int exynos_drm_fb_mmap(struct fb_info *info,
+			struct vm_area_struct *vma)
+{
+	struct drm_fb_helper *helper = info->par;
+	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+	unsigned long vm_size;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+	vm_size = vma->vm_end - vma->vm_start;
+
+	if (vm_size > buffer->size)
+		return -EINVAL;
+
+	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+	if (ret < 0) {
+		DRM_ERROR("failed to mmap.\n");
+		return ret;
+	}
+
+	return 0;
+}
+
 static struct fb_ops exynos_drm_fb_ops = {
 	.owner		= THIS_MODULE,
+	.fb_mmap        = exynos_drm_fb_mmap,
 	.fb_fillrect	= cfb_fillrect,
 	.fb_copyarea	= cfb_copyarea,
 	.fb_imageblit	= cfb_imageblit,
@@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 		return -EFAULT;
 	}
 
+	/* map pages with kernel virtual space. */
+	if (!buffer->kvaddr) {
+		unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+		buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+					pgprot_writecombine(PAGE_KERNEL));
+		if (!buffer->kvaddr) {
+			DRM_ERROR("failed to map pages to kernel space.\n");
+			return -EIO;
+		}
+	}
+
 	/* buffer count to framebuffer always is 1 at booting time. */
 	exynos_drm_fb_set_buf_cnt(fb, 1);
 
@@ -87,8 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 
 	dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
 	fbi->screen_base = buffer->kvaddr + offset;
-	fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
-				offset);
+	fbi->fix.smem_start = (unsigned long)
+			(page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
 	fbi->screen_size = size;
 	fbi->fix.smem_len = size;
 
@@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 	exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
 	if (IS_ERR(exynos_gem_obj)) {
 		ret = PTR_ERR(exynos_gem_obj);
-		goto out;
+		goto err_release_framebuffer;
 	}
 
 	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 	if (IS_ERR_OR_NULL(helper->fb)) {
 		DRM_ERROR("failed to create drm framebuffer.\n");
 		ret = PTR_ERR(helper->fb);
-		goto out;
+		goto err_destroy_gem;
 	}
 
 	helper->fbdev = fbi;
@@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
 	if (ret) {
 		DRM_ERROR("failed to allocate cmap.\n");
-		goto out;
+		goto err_destroy_framebuffer;
 	}
 
 	ret = exynos_drm_fbdev_update(helper, helper->fb);
-	if (ret < 0) {
-		fb_dealloc_cmap(&fbi->cmap);
-		goto out;
-	}
+	if (ret < 0)
+		goto err_dealloc_cmap;
+
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+
+err_dealloc_cmap:
+	fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+	drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+	exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+	framebuffer_release(fbi);
 
 /*
  * if failed, all resources allocated above would be released by
@@ -265,8 +316,13 @@ err_init:
 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
 				      struct drm_fb_helper *fb_helper)
 {
+	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
 	struct drm_framebuffer *fb;
 
+	if (exynos_gem_obj->buffer->kvaddr)
+		vunmap(exynos_gem_obj->buffer->kvaddr);
+
 	/* release drm framebuffer and real buffer */
 	if (fb_helper->fb && fb_helper->fb->funcs) {
 		fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 000000000000..61ea24296b52
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,2001 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC is stand for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS	4
+#define FIMC_MAX_SRC	2
+#define FIMC_MAX_DST	32
+#define FIMC_SHFACTOR	10
+#define FIMC_BUF_STOP	1
+#define FIMC_BUF_START	2
+#define FIMC_REG_SZ		32
+#define FIMC_WIDTH_ITU_709	1280
+#define FIMC_REFRESH_MAX	60
+#define FIMC_REFRESH_MIN	12
+#define FIMC_CROP_MAX	8192
+#define FIMC_CROP_MIN	32
+#define FIMC_SCALE_MAX	4224
+#define FIMC_SCALE_MIN	32
+
+#define get_fimc_context(dev)	platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
+					struct fimc_context, ippdrv);
+#define fimc_read(offset)		readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset)	writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+	FIMC_WB_NONE,
+	FIMC_WB_A,
+	FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+	bool	range;
+	bool bypass;
+	bool up_h;
+	bool up_v;
+	u32 hratio;
+	u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+	/* scaler */
+	u32	in_hori;
+	u32	bypass;
+	/* output rotator */
+	u32	dst_h_wo_rot;
+	u32	dst_h_rot;
+	/* input rotator */
+	u32	rl_w_wo_rot;
+	u32	rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+	char	*parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+	struct exynos_drm_ippdrv	ippdrv;
+	struct resource	*regs_res;
+	void __iomem	*regs;
+	struct mutex	lock;
+	struct clk	*sclk_fimc_clk;
+	struct clk	*fimc_clk;
+	struct clk	*wb_clk;
+	struct clk	*wb_b_clk;
+	struct fimc_scaler	sc;
+	struct fimc_driverdata	*ddata;
+	struct exynos_drm_ipp_pol	pol;
+	int	id;
+	int	irq;
+	bool	suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
+
+	cfg = fimc_read(EXYNOS_CISRCFMT);
+	cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+	if (pattern)
+		cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
+
+	fimc_write(cfg, EXYNOS_CISRCFMT);
+
+	/* s/w reset */
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg |= (EXYNOS_CIGCTRL_SWRST);
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+
+	/* s/w reset complete */
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg &= ~EXYNOS_CIGCTRL_SWRST;
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+
+	/* reset sequence */
+	fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+	u32 camblk_cfg;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	camblk_cfg = readl(SYSREG_CAMERA_BLK);
+	camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+	camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+	writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+		EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+		EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+		EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+		EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+		EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+	switch (wb) {
+	case FIMC_WB_A:
+		cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+			EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+		break;
+	case FIMC_WB_B:
+		cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+			EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+		break;
+	case FIMC_WB_NONE:
+	default:
+		cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+			EXYNOS_CIGCTRL_SELWRITEBACK_A |
+			EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+			EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+		break;
+	}
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+		struct exynos_drm_ipp_pol *pol)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+		__func__, pol->inv_pclk, pol->inv_vsync);
+	DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+		__func__, pol->inv_href, pol->inv_hsync);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+		 EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+	if (pol->inv_pclk)
+		cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+	if (pol->inv_vsync)
+		cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+	if (pol->inv_href)
+		cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+	if (pol->inv_hsync)
+		cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	if (enable)
+		cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+	else
+		cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+		bool overflow, bool level)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+			enable, overflow, level);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	if (enable) {
+		cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+		cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+		if (overflow)
+			cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+		if (level)
+			cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+	} else
+		cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg, status, flag;
+
+	status = fimc_read(EXYNOS_CISTATUS);
+	flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+		EXYNOS_CISTATUS_OVFICR;
+
+	DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+	if (status & flag) {
+		cfg = fimc_read(EXYNOS_CIWDOFST);
+		cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+			EXYNOS_CIWDOFST_CLROVFICR);
+
+		fimc_write(cfg, EXYNOS_CIWDOFST);
+
+		cfg = fimc_read(EXYNOS_CIWDOFST);
+		cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+			EXYNOS_CIWDOFST_CLROVFICR);
+
+		fimc_write(cfg, EXYNOS_CIWDOFST);
+
+		dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+			ctx->id, status);
+		return true;
+	}
+
+	return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+	u32 cfg;
+
+	cfg = fimc_read(EXYNOS_CISTATUS);
+
+	DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+	if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+		return false;
+
+	cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+	fimc_write(cfg, EXYNOS_CISTATUS);
+
+	return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+	u32 cfg;
+	int frame_cnt, buf_id;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	cfg = fimc_read(EXYNOS_CISTATUS2);
+	frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+	if (frame_cnt == 0)
+		frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+	DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+		EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+		EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+	if (frame_cnt == 0) {
+		DRM_ERROR("failed to get frame count.\n");
+		return -EIO;
+	}
+
+	buf_id = frame_cnt - 1;
+	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+	return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	cfg = fimc_read(EXYNOS_CIOCTRL);
+	if (enable)
+		cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+	else
+		cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+	fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	/* RGB */
+	cfg = fimc_read(EXYNOS_CISCCTRL);
+	cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+		cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		return 0;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_XRGB8888:
+		cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		return 0;
+	default:
+		/* bypass */
+		break;
+	}
+
+	/* YUV */
+	cfg = fimc_read(EXYNOS_MSCTRL);
+	cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+		EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+		EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+	switch (fmt) {
+	case DRM_FORMAT_YUYV:
+		cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+		break;
+	case DRM_FORMAT_YVYU:
+		cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+		break;
+	case DRM_FORMAT_UYVY:
+		cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+		break;
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_YUV444:
+		cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+		break;
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+		cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+			EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+		break;
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV12MT:
+	case DRM_FORMAT_NV16:
+		cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+			EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg, EXYNOS_MSCTRL);
+
+	return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	cfg = fimc_read(EXYNOS_MSCTRL);
+	cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_XRGB8888:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+		break;
+	case DRM_FORMAT_YUV444:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+		break;
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+		break;
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_YUV422:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+		break;
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV12MT:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg, EXYNOS_MSCTRL);
+
+	cfg = fimc_read(EXYNOS_CIDMAPARAM);
+	cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+	if (fmt == DRM_FORMAT_NV12MT)
+		cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+	else
+		cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+	fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+	return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg1, cfg2;
+
+	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+		degree, flip);
+
+	cfg1 = fimc_read(EXYNOS_MSCTRL);
+	cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+		EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+	cfg2 = fimc_read(EXYNOS_CITRGFMT);
+	cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_0:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_90:
+		cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+			EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+			EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+		cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg1, EXYNOS_MSCTRL);
+	fimc_write(cfg2, EXYNOS_CITRGFMT);
+	*swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+	return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	u32 cfg, h1, h2, v1, v2;
+
+	/* cropped image */
+	h1 = pos->x;
+	h2 = sz->hsize - pos->w - pos->x;
+	v1 = pos->y;
+	v2 = sz->vsize - pos->h - pos->y;
+
+	DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+	__func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+	DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+		h1, h2, v1, v2);
+
+	/*
+	 * set window offset 1, 2 size
+	 * check figure 43-21 in user manual
+	 */
+	cfg = fimc_read(EXYNOS_CIWDOFST);
+	cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+		EXYNOS_CIWDOFST_WINVEROFST_MASK);
+	cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+		EXYNOS_CIWDOFST_WINVEROFST(v1));
+	cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+	fimc_write(cfg, EXYNOS_CIWDOFST);
+
+	cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+		EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+	fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+	return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct drm_exynos_pos img_pos = *pos;
+	struct drm_exynos_sz img_sz = *sz;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+		__func__, swap, sz->hsize, sz->vsize);
+
+	/* original size */
+	cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+		EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+	fimc_write(cfg, EXYNOS_ORGISIZE);
+
+	DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+		pos->x, pos->y, pos->w, pos->h);
+
+	if (swap) {
+		img_pos.w = pos->h;
+		img_pos.h = pos->w;
+		img_sz.hsize = sz->vsize;
+		img_sz.vsize = sz->hsize;
+	}
+
+	/* set input DMA image size */
+	cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+	cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+		EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+	cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+		EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+	fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+	/*
+	 * set input FIFO image size
+	 * for now, we support only ITU601 8 bit mode
+	 */
+	cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+		EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+		EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+	fimc_write(cfg, EXYNOS_CISRCFMT);
+
+	/* offset Y(RGB), Cb, Cr */
+	cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIIYOFF);
+	cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIICBOFF);
+	cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIICROFF);
+
+	return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+	struct drm_exynos_ipp_property *property;
+	struct drm_exynos_ipp_config *config;
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EINVAL;
+	}
+
+	property = &c_node->property;
+	if (!property) {
+		DRM_ERROR("failed to get property.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+		property->prop_id, buf_id, buf_type);
+
+	if (buf_id > FIMC_MAX_SRC) {
+		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+		return -ENOMEM;
+	}
+
+	/* address register set */
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		config = &property->config[EXYNOS_DRM_OPS_SRC];
+		fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+			EXYNOS_CIIYSA(buf_id));
+
+		if (config->fmt == DRM_FORMAT_YVU420) {
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+				EXYNOS_CIICBSA(buf_id));
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+				EXYNOS_CIICRSA(buf_id));
+		} else {
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+				EXYNOS_CIICBSA(buf_id));
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+				EXYNOS_CIICRSA(buf_id));
+		}
+		break;
+	case IPP_BUF_DEQUEUE:
+		fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+		fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+		fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+	.set_fmt = fimc_src_set_fmt,
+	.set_transf = fimc_src_set_transf,
+	.set_size = fimc_src_set_size,
+	.set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	/* RGB */
+	cfg = fimc_read(EXYNOS_CISCCTRL);
+	cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+		cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		return 0;
+	case DRM_FORMAT_RGB888:
+		cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		return 0;
+	case DRM_FORMAT_XRGB8888:
+		cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+			EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	/* YUV */
+	cfg = fimc_read(EXYNOS_CIOCTRL);
+	cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+		EXYNOS_CIOCTRL_ORDER422_MASK |
+		EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+	switch (fmt) {
+	case DRM_FORMAT_XRGB8888:
+		cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+		break;
+	case DRM_FORMAT_YUYV:
+		cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+		break;
+	case DRM_FORMAT_YVYU:
+		cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+		break;
+	case DRM_FORMAT_UYVY:
+		cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+		break;
+	case DRM_FORMAT_VYUY:
+		cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+		break;
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+		cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+		cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+		break;
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV12MT:
+	case DRM_FORMAT_NV16:
+		cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+		cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg, EXYNOS_CIOCTRL);
+
+	return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	cfg = fimc_read(EXYNOS_CIEXTEN);
+
+	if (fmt == DRM_FORMAT_AYUV) {
+		cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+		fimc_write(cfg, EXYNOS_CIEXTEN);
+	} else {
+		cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+		fimc_write(cfg, EXYNOS_CIEXTEN);
+
+		cfg = fimc_read(EXYNOS_CITRGFMT);
+		cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+		switch (fmt) {
+		case DRM_FORMAT_RGB565:
+		case DRM_FORMAT_RGB888:
+		case DRM_FORMAT_XRGB8888:
+			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+			break;
+		case DRM_FORMAT_YUYV:
+		case DRM_FORMAT_YVYU:
+		case DRM_FORMAT_UYVY:
+		case DRM_FORMAT_VYUY:
+			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+			break;
+		case DRM_FORMAT_NV16:
+		case DRM_FORMAT_NV61:
+		case DRM_FORMAT_YUV422:
+			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+			break;
+		case DRM_FORMAT_YUV420:
+		case DRM_FORMAT_YVU420:
+		case DRM_FORMAT_NV12:
+		case DRM_FORMAT_NV12MT:
+		case DRM_FORMAT_NV21:
+			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+			break;
+		default:
+			dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+				fmt);
+			return -EINVAL;
+		}
+
+		fimc_write(cfg, EXYNOS_CITRGFMT);
+	}
+
+	cfg = fimc_read(EXYNOS_CIDMAPARAM);
+	cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+	if (fmt == DRM_FORMAT_NV12MT)
+		cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+	else
+		cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+	fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+	return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+		degree, flip);
+
+	cfg = fimc_read(EXYNOS_CITRGFMT);
+	cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+	cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_0:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_90:
+		cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+			EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+			EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+			EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg, EXYNOS_CITRGFMT);
+	*swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+	return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+	DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+	if (src >= dst * 64) {
+		DRM_ERROR("failed to make ratio and shift.\n");
+		return -EINVAL;
+	} else if (src >= dst * 32) {
+		*ratio = 32;
+		*shift = 5;
+	} else if (src >= dst * 16) {
+		*ratio = 16;
+		*shift = 4;
+	} else if (src >= dst * 8) {
+		*ratio = 8;
+		*shift = 3;
+	} else if (src >= dst * 4) {
+		*ratio = 4;
+		*shift = 2;
+	} else if (src >= dst * 2) {
+		*ratio = 2;
+		*shift = 1;
+	} else {
+		*ratio = 1;
+		*shift = 0;
+	}
+
+	return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+		struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg, cfg_ext, shfactor;
+	u32 pre_dst_width, pre_dst_height;
+	u32 pre_hratio, hfactor, pre_vratio, vfactor;
+	int ret = 0;
+	u32 src_w, src_h, dst_w, dst_h;
+
+	cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+	if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+		src_w = src->h;
+		src_h = src->w;
+	} else {
+		src_w = src->w;
+		src_h = src->h;
+	}
+
+	if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+		dst_w = dst->h;
+		dst_h = dst->w;
+	} else {
+		dst_w = dst->w;
+		dst_h = dst->h;
+	}
+
+	ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+	if (ret) {
+		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+		return ret;
+	}
+
+	ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+	if (ret) {
+		dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+		return ret;
+	}
+
+	pre_dst_width = src_w / pre_hratio;
+	pre_dst_height = src_h / pre_vratio;
+	DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+		pre_dst_width, pre_dst_height);
+	DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+		__func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+	sc->hratio = (src_w << 14) / (dst_w << hfactor);
+	sc->vratio = (src_h << 14) / (dst_h << vfactor);
+	sc->up_h = (dst_w >= src_w) ? true : false;
+	sc->up_v = (dst_h >= src_h) ? true : false;
+	DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+	__func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+	shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+	DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+	cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+		EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+		EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+	fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+	cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+		EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+	fimc_write(cfg, EXYNOS_CISCPREDST);
+
+	return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+	u32 cfg, cfg_ext;
+
+	DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+		__func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+	DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+		__func__, sc->hratio, sc->vratio);
+
+	cfg = fimc_read(EXYNOS_CISCCTRL);
+	cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+		EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+		EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+		EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+		EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+		EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+	if (sc->range)
+		cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+			EXYNOS_CISCCTRL_CSCY2R_WIDE);
+	if (sc->bypass)
+		cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+	if (sc->up_h)
+		cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+	if (sc->up_v)
+		cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+	cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+		EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+	fimc_write(cfg, EXYNOS_CISCCTRL);
+
+	cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+	cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+	cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+	cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+		EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+	fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct drm_exynos_pos img_pos = *pos;
+	struct drm_exynos_sz img_sz = *sz;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+		__func__, swap, sz->hsize, sz->vsize);
+
+	/* original size */
+	cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+		EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+	fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+	DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+		__func__, pos->x, pos->y, pos->w, pos->h);
+
+	/* CSC ITU */
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+	if (sz->hsize >= FIMC_WIDTH_ITU_709)
+		cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+	else
+		cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+
+	if (swap) {
+		img_pos.w = pos->h;
+		img_pos.h = pos->w;
+		img_sz.hsize = sz->vsize;
+		img_sz.vsize = sz->hsize;
+	}
+
+	/* target image size */
+	cfg = fimc_read(EXYNOS_CITRGFMT);
+	cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+		EXYNOS_CITRGFMT_TARGETV_MASK);
+	cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+		EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+	fimc_write(cfg, EXYNOS_CITRGFMT);
+
+	/* target area */
+	cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+	fimc_write(cfg, EXYNOS_CITAREA);
+
+	/* offset Y(RGB), Cb, Cr */
+	cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIOYOFF);
+	cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIOCBOFF);
+	cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIOCROFF);
+
+	return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+	u32 cfg, i, buf_num = 0;
+	u32 mask = 0x00000001;
+
+	cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+	for (i = 0; i < FIMC_REG_SZ; i++)
+		if (cfg & (mask << i))
+			buf_num++;
+
+	DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+	return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	bool enable;
+	u32 cfg;
+	u32 mask = 0x00000001 << buf_id;
+	int ret = 0;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+		buf_id, buf_type);
+
+	mutex_lock(&ctx->lock);
+
+	/* mask register set */
+	cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		enable = true;
+		break;
+	case IPP_BUF_DEQUEUE:
+		enable = false;
+		break;
+	default:
+		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+		ret =  -EINVAL;
+		goto err_unlock;
+	}
+
+	/* sequence id */
+	cfg &= (~mask);
+	cfg |= (enable << buf_id);
+	fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+	/* interrupt enable */
+	if (buf_type == IPP_BUF_ENQUEUE &&
+	    fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+		fimc_handle_irq(ctx, true, false, true);
+
+	/* interrupt disable */
+	if (buf_type == IPP_BUF_DEQUEUE &&
+	    fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+		fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+	mutex_unlock(&ctx->lock);
+	return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+	struct drm_exynos_ipp_property *property;
+	struct drm_exynos_ipp_config *config;
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EINVAL;
+	}
+
+	property = &c_node->property;
+	if (!property) {
+		DRM_ERROR("failed to get property.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+		property->prop_id, buf_id, buf_type);
+
+	if (buf_id > FIMC_MAX_DST) {
+		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+		return -ENOMEM;
+	}
+
+	/* address register set */
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		config = &property->config[EXYNOS_DRM_OPS_DST];
+
+		fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+			EXYNOS_CIOYSA(buf_id));
+
+		if (config->fmt == DRM_FORMAT_YVU420) {
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+				EXYNOS_CIOCBSA(buf_id));
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+				EXYNOS_CIOCRSA(buf_id));
+		} else {
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+				EXYNOS_CIOCBSA(buf_id));
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+				EXYNOS_CIOCRSA(buf_id));
+		}
+		break;
+	case IPP_BUF_DEQUEUE:
+		fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+		fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+		fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+	.set_fmt = fimc_dst_set_fmt,
+	.set_transf = fimc_dst_set_transf,
+	.set_size = fimc_dst_set_size,
+	.set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	if (enable) {
+		clk_enable(ctx->sclk_fimc_clk);
+		clk_enable(ctx->fimc_clk);
+		clk_enable(ctx->wb_clk);
+		ctx->suspended = false;
+	} else {
+		clk_disable(ctx->sclk_fimc_clk);
+		clk_disable(ctx->fimc_clk);
+		clk_disable(ctx->wb_clk);
+		ctx->suspended = true;
+	}
+
+	return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+	struct fimc_context *ctx = dev_id;
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+	struct drm_exynos_ipp_event_work *event_work =
+		c_node->event_work;
+	int buf_id;
+
+	DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+	fimc_clear_irq(ctx);
+	if (fimc_check_ovf(ctx))
+		return IRQ_NONE;
+
+	if (!fimc_check_frame_end(ctx))
+		return IRQ_NONE;
+
+	buf_id = fimc_get_buf_id(ctx);
+	if (buf_id < 0)
+		return IRQ_HANDLED;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+	if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+		DRM_ERROR("failed to dequeue.\n");
+		return IRQ_HANDLED;
+	}
+
+	event_work->ippdrv = ippdrv;
+	event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+	queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+	return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+	struct drm_exynos_ipp_prop_list *prop_list;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+	if (!prop_list) {
+		DRM_ERROR("failed to alloc property list.\n");
+		return -ENOMEM;
+	}
+
+	prop_list->version = 1;
+	prop_list->writeback = 1;
+	prop_list->refresh_min = FIMC_REFRESH_MIN;
+	prop_list->refresh_max = FIMC_REFRESH_MAX;
+	prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+				(1 << EXYNOS_DRM_FLIP_VERTICAL) |
+				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+				(1 << EXYNOS_DRM_DEGREE_90) |
+				(1 << EXYNOS_DRM_DEGREE_180) |
+				(1 << EXYNOS_DRM_DEGREE_270);
+	prop_list->csc = 1;
+	prop_list->crop = 1;
+	prop_list->crop_max.hsize = FIMC_CROP_MAX;
+	prop_list->crop_max.vsize = FIMC_CROP_MAX;
+	prop_list->crop_min.hsize = FIMC_CROP_MIN;
+	prop_list->crop_min.vsize = FIMC_CROP_MIN;
+	prop_list->scale = 1;
+	prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+	prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+	prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+	prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+	ippdrv->prop_list = prop_list;
+
+	return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+	switch (flip) {
+	case EXYNOS_DRM_FLIP_NONE:
+	case EXYNOS_DRM_FLIP_VERTICAL:
+	case EXYNOS_DRM_FLIP_HORIZONTAL:
+		return true;
+	default:
+		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+		return false;
+	}
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+		struct drm_exynos_ipp_property *property)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+	struct drm_exynos_ipp_config *config;
+	struct drm_exynos_pos *pos;
+	struct drm_exynos_sz *sz;
+	bool swap;
+	int i;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	for_each_ipp_ops(i) {
+		if ((i == EXYNOS_DRM_OPS_SRC) &&
+			(property->cmd == IPP_CMD_WB))
+			continue;
+
+		config = &property->config[i];
+		pos = &config->pos;
+		sz = &config->sz;
+
+		/* check for flip */
+		if (!fimc_check_drm_flip(config->flip)) {
+			DRM_ERROR("invalid flip.\n");
+			goto err_property;
+		}
+
+		/* check for degree */
+		switch (config->degree) {
+		case EXYNOS_DRM_DEGREE_90:
+		case EXYNOS_DRM_DEGREE_270:
+			swap = true;
+			break;
+		case EXYNOS_DRM_DEGREE_0:
+		case EXYNOS_DRM_DEGREE_180:
+			swap = false;
+			break;
+		default:
+			DRM_ERROR("invalid degree.\n");
+			goto err_property;
+		}
+
+		/* check for buffer bound */
+		if ((pos->x + pos->w > sz->hsize) ||
+			(pos->y + pos->h > sz->vsize)) {
+			DRM_ERROR("out of buf bound.\n");
+			goto err_property;
+		}
+
+		/* check for crop */
+		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+			if (swap) {
+				if ((pos->h < pp->crop_min.hsize) ||
+					(sz->vsize > pp->crop_max.hsize) ||
+					(pos->w < pp->crop_min.vsize) ||
+					(sz->hsize > pp->crop_max.vsize)) {
+					DRM_ERROR("out of crop size.\n");
+					goto err_property;
+				}
+			} else {
+				if ((pos->w < pp->crop_min.hsize) ||
+					(sz->hsize > pp->crop_max.hsize) ||
+					(pos->h < pp->crop_min.vsize) ||
+					(sz->vsize > pp->crop_max.vsize)) {
+					DRM_ERROR("out of crop size.\n");
+					goto err_property;
+				}
+			}
+		}
+
+		/* check for scale */
+		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+			if (swap) {
+				if ((pos->h < pp->scale_min.hsize) ||
+					(sz->vsize > pp->scale_max.hsize) ||
+					(pos->w < pp->scale_min.vsize) ||
+					(sz->hsize > pp->scale_max.vsize)) {
+					DRM_ERROR("out of scale size.\n");
+					goto err_property;
+				}
+			} else {
+				if ((pos->w < pp->scale_min.hsize) ||
+					(sz->hsize > pp->scale_max.hsize) ||
+					(pos->h < pp->scale_min.vsize) ||
+					(sz->vsize > pp->scale_max.vsize)) {
+					DRM_ERROR("out of scale size.\n");
+					goto err_property;
+				}
+			}
+		}
+	}
+
+	return 0;
+
+err_property:
+	for_each_ipp_ops(i) {
+		if ((i == EXYNOS_DRM_OPS_SRC) &&
+			(property->cmd == IPP_CMD_WB))
+			continue;
+
+		config = &property->config[i];
+		pos = &config->pos;
+		sz = &config->sz;
+
+		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+			i ? "dst" : "src", config->flip, config->degree,
+			pos->x, pos->y, pos->w, pos->h,
+			sz->hsize, sz->vsize);
+	}
+
+	return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+	int i;
+
+	DRM_DEBUG_KMS("%s:\n", __func__);
+
+	for (i = 0; i < FIMC_MAX_SRC; i++) {
+		fimc_write(0, EXYNOS_CIIYSA(i));
+		fimc_write(0, EXYNOS_CIICBSA(i));
+		fimc_write(0, EXYNOS_CIICRSA(i));
+	}
+
+	for (i = 0; i < FIMC_MAX_DST; i++) {
+		fimc_write(0, EXYNOS_CIOYSA(i));
+		fimc_write(0, EXYNOS_CIOCBSA(i));
+		fimc_write(0, EXYNOS_CIOCRSA(i));
+	}
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* reset h/w block */
+	fimc_sw_reset(ctx, false);
+
+	/* reset scaler capability */
+	memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+	fimc_clear_addr(ctx);
+
+	return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+	struct drm_exynos_ipp_property *property;
+	struct drm_exynos_ipp_config *config;
+	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX];
+	struct drm_exynos_ipp_set_wb set_wb;
+	int ret, i;
+	u32 cfg0, cfg1;
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EINVAL;
+	}
+
+	property = &c_node->property;
+	if (!property) {
+		DRM_ERROR("failed to get property.\n");
+		return -EINVAL;
+	}
+
+	fimc_handle_irq(ctx, true, false, true);
+
+	for_each_ipp_ops(i) {
+		config = &property->config[i];
+		img_pos[i] = config->pos;
+	}
+
+	ret = fimc_set_prescaler(ctx, &ctx->sc,
+		&img_pos[EXYNOS_DRM_OPS_SRC],
+		&img_pos[EXYNOS_DRM_OPS_DST]);
+	if (ret) {
+		dev_err(dev, "failed to set precalser.\n");
+		return ret;
+	}
+
+	/* If set ture, we can save jpeg about screen */
+	fimc_handle_jpeg(ctx, false);
+	fimc_set_scaler(ctx, &ctx->sc);
+	fimc_set_polarity(ctx, &ctx->pol);
+
+	switch (cmd) {
+	case IPP_CMD_M2M:
+		fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+		fimc_handle_lastend(ctx, false);
+
+		/* setup dma */
+		cfg0 = fimc_read(EXYNOS_MSCTRL);
+		cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+		cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+		fimc_write(cfg0, EXYNOS_MSCTRL);
+		break;
+	case IPP_CMD_WB:
+		fimc_set_type_ctrl(ctx, FIMC_WB_A);
+		fimc_handle_lastend(ctx, true);
+
+		/* setup FIMD */
+		fimc_set_camblk_fimd0_wb(ctx);
+
+		set_wb.enable = 1;
+		set_wb.refresh = property->refresh_rate;
+		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+		break;
+	case IPP_CMD_OUTPUT:
+	default:
+		ret = -EINVAL;
+		dev_err(dev, "invalid operations.\n");
+		return ret;
+	}
+
+	/* Reset status */
+	fimc_write(0x0, EXYNOS_CISTATUS);
+
+	cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+	cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+	cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+	/* Scaler */
+	cfg1 = fimc_read(EXYNOS_CISCCTRL);
+	cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+	cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+		EXYNOS_CISCCTRL_SCALERSTART);
+
+	fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+	/* Enable image capture*/
+	cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+	fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+	/* Disable frame end irq */
+	cfg0 = fimc_read(EXYNOS_CIGCTRL);
+	cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+	fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+	cfg0 = fimc_read(EXYNOS_CIOCTRL);
+	cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+	fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+	if (cmd == IPP_CMD_M2M) {
+		cfg0 = fimc_read(EXYNOS_MSCTRL);
+		cfg0 |= EXYNOS_MSCTRL_ENVID;
+		fimc_write(cfg0, EXYNOS_MSCTRL);
+
+		cfg0 = fimc_read(EXYNOS_MSCTRL);
+		cfg0 |= EXYNOS_MSCTRL_ENVID;
+		fimc_write(cfg0, EXYNOS_MSCTRL);
+	}
+
+	return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+	switch (cmd) {
+	case IPP_CMD_M2M:
+		/* Source clear */
+		cfg = fimc_read(EXYNOS_MSCTRL);
+		cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+		cfg &= ~EXYNOS_MSCTRL_ENVID;
+		fimc_write(cfg, EXYNOS_MSCTRL);
+		break;
+	case IPP_CMD_WB:
+		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+		break;
+	case IPP_CMD_OUTPUT:
+	default:
+		dev_err(dev, "invalid operations.\n");
+		break;
+	}
+
+	fimc_handle_irq(ctx, false, false, true);
+
+	/* reset sequence */
+	fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+	/* Scaler disable */
+	cfg = fimc_read(EXYNOS_CISCCTRL);
+	cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+	fimc_write(cfg, EXYNOS_CISCCTRL);
+
+	/* Disable image capture */
+	cfg = fimc_read(EXYNOS_CIIMGCPT);
+	cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+	fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+	/* Enable frame end irq */
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct fimc_context *ctx;
+	struct clk	*parent_clk;
+	struct resource *res;
+	struct exynos_drm_ippdrv *ippdrv;
+	struct exynos_drm_fimc_pdata *pdata;
+	struct fimc_driverdata *ddata;
+	int ret;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(dev, "no platform data specified.\n");
+		return -EINVAL;
+	}
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ddata = (struct fimc_driverdata *)
+		platform_get_device_id(pdev)->driver_data;
+
+	/* clock control */
+	ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
+	if (IS_ERR(ctx->sclk_fimc_clk)) {
+		dev_err(dev, "failed to get src fimc clock.\n");
+		ret = PTR_ERR(ctx->sclk_fimc_clk);
+		goto err_ctx;
+	}
+	clk_enable(ctx->sclk_fimc_clk);
+
+	ctx->fimc_clk = clk_get(dev, "fimc");
+	if (IS_ERR(ctx->fimc_clk)) {
+		dev_err(dev, "failed to get fimc clock.\n");
+		ret = PTR_ERR(ctx->fimc_clk);
+		clk_disable(ctx->sclk_fimc_clk);
+		clk_put(ctx->sclk_fimc_clk);
+		goto err_ctx;
+	}
+
+	ctx->wb_clk = clk_get(dev, "pxl_async0");
+	if (IS_ERR(ctx->wb_clk)) {
+		dev_err(dev, "failed to get writeback a clock.\n");
+		ret = PTR_ERR(ctx->wb_clk);
+		clk_disable(ctx->sclk_fimc_clk);
+		clk_put(ctx->sclk_fimc_clk);
+		clk_put(ctx->fimc_clk);
+		goto err_ctx;
+	}
+
+	ctx->wb_b_clk = clk_get(dev, "pxl_async1");
+	if (IS_ERR(ctx->wb_b_clk)) {
+		dev_err(dev, "failed to get writeback b clock.\n");
+		ret = PTR_ERR(ctx->wb_b_clk);
+		clk_disable(ctx->sclk_fimc_clk);
+		clk_put(ctx->sclk_fimc_clk);
+		clk_put(ctx->fimc_clk);
+		clk_put(ctx->wb_clk);
+		goto err_ctx;
+	}
+
+	parent_clk = clk_get(dev, ddata->parent_clk);
+
+	if (IS_ERR(parent_clk)) {
+		dev_err(dev, "failed to get parent clock.\n");
+		ret = PTR_ERR(parent_clk);
+		clk_disable(ctx->sclk_fimc_clk);
+		clk_put(ctx->sclk_fimc_clk);
+		clk_put(ctx->fimc_clk);
+		clk_put(ctx->wb_clk);
+		clk_put(ctx->wb_b_clk);
+		goto err_ctx;
+	}
+
+	if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+		dev_err(dev, "failed to set parent.\n");
+		ret = -EINVAL;
+		clk_put(parent_clk);
+		clk_disable(ctx->sclk_fimc_clk);
+		clk_put(ctx->sclk_fimc_clk);
+		clk_put(ctx->fimc_clk);
+		clk_put(ctx->wb_clk);
+		clk_put(ctx->wb_b_clk);
+		goto err_ctx;
+	}
+
+	clk_put(parent_clk);
+	clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+	/* resource memory */
+	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!ctx->regs_res) {
+		dev_err(dev, "failed to find registers.\n");
+		ret = -ENOENT;
+		goto err_clk;
+	}
+
+	ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+	if (!ctx->regs) {
+		dev_err(dev, "failed to map registers.\n");
+		ret = -ENXIO;
+		goto err_clk;
+	}
+
+	/* resource irq */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(dev, "failed to request irq resource.\n");
+		ret = -ENOENT;
+		goto err_get_regs;
+	}
+
+	ctx->irq = res->start;
+	ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+		IRQF_ONESHOT, "drm_fimc", ctx);
+	if (ret < 0) {
+		dev_err(dev, "failed to request irq.\n");
+		goto err_get_regs;
+	}
+
+	/* context initailization */
+	ctx->id = pdev->id;
+	ctx->pol = pdata->pol;
+	ctx->ddata = ddata;
+
+	ippdrv = &ctx->ippdrv;
+	ippdrv->dev = dev;
+	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+	ippdrv->check_property = fimc_ippdrv_check_property;
+	ippdrv->reset = fimc_ippdrv_reset;
+	ippdrv->start = fimc_ippdrv_start;
+	ippdrv->stop = fimc_ippdrv_stop;
+	ret = fimc_init_prop_list(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to init property list.\n");
+		goto err_get_irq;
+	}
+
+	DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+		(int)ippdrv);
+
+	mutex_init(&ctx->lock);
+	platform_set_drvdata(pdev, ctx);
+
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	ret = exynos_drm_ippdrv_register(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to register drm fimc device.\n");
+		goto err_ippdrv_register;
+	}
+
+	dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+	return 0;
+
+err_ippdrv_register:
+	devm_kfree(dev, ippdrv->prop_list);
+	pm_runtime_disable(dev);
+err_get_irq:
+	free_irq(ctx->irq, ctx);
+err_get_regs:
+	devm_iounmap(dev, ctx->regs);
+err_clk:
+	clk_put(ctx->sclk_fimc_clk);
+	clk_put(ctx->fimc_clk);
+	clk_put(ctx->wb_clk);
+	clk_put(ctx->wb_b_clk);
+err_ctx:
+	devm_kfree(dev, ctx);
+	return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+	devm_kfree(dev, ippdrv->prop_list);
+	exynos_drm_ippdrv_unregister(ippdrv);
+	mutex_destroy(&ctx->lock);
+
+	pm_runtime_set_suspended(dev);
+	pm_runtime_disable(dev);
+
+	free_irq(ctx->irq, ctx);
+	devm_iounmap(dev, ctx->regs);
+
+	clk_put(ctx->sclk_fimc_clk);
+	clk_put(ctx->fimc_clk);
+	clk_put(ctx->wb_clk);
+	clk_put(ctx->wb_b_clk);
+
+	devm_kfree(dev, ctx);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	if (!pm_runtime_suspended(dev))
+		return fimc_clk_ctrl(ctx, true);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	return  fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	return  fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+	.parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+	.parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+	{
+		.name		= "exynos4210-fimc",
+		.driver_data	= (unsigned long)&exynos4210_fimc_data,
+	}, {
+		.name		= "exynos4412-fimc",
+		.driver_data	= (unsigned long)&exynos4410_fimc_data,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+	SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+	.probe		= fimc_probe,
+	.remove		= __devexit_p(fimc_remove),
+	.id_table	= fimc_driver_ids,
+	.driver		= {
+		.name	= "exynos-drm-fimc",
+		.owner	= THIS_MODULE,
+		.pm	= &fimc_pm_ops,
+	},
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 000000000000..dc970fa0d888
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e08478f19f1a..bf0d9baca2bc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
 #include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fbdev.h"
 #include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
 
 /*
  * FIMD is stand for Fully Interactive Mobile Display and
@@ -78,10 +80,10 @@ struct fimd_win_data {
 	unsigned int		fb_height;
 	unsigned int		bpp;
 	dma_addr_t		dma_addr;
-	void __iomem		*vaddr;
 	unsigned int		buf_offsize;
 	unsigned int		line_size;	/* bytes */
 	bool			enabled;
+	bool			resume;
 };
 
 struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
 	u32				vidcon1;
 	bool				suspended;
 	struct mutex			lock;
+	wait_queue_head_t		wait_vsync_queue;
+	atomic_t			wait_vsync_event;
 
 	struct exynos_drm_panel_info *panel;
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+	{ .compatible = "samsung,exynos4-fimd",
+	  .data = &exynos4_fimd_driver_data },
+	{ .compatible = "samsung,exynos5-fimd",
+	  .data = &exynos5_fimd_driver_data },
+	{},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
 	struct platform_device *pdev)
 {
+#ifdef CONFIG_OF
+	const struct of_device_id *of_id =
+			of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+	if (of_id)
+		return (struct fimd_driver_data *)of_id->data;
+#endif
+
 	return (struct fimd_driver_data *)
 		platform_get_device_id(pdev)->driver_data;
 }
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
 
 	/* setup horizontal and vertical display size. */
 	val = VIDTCON2_LINEVAL(timing->yres - 1) |
-	       VIDTCON2_HOZVAL(timing->xres - 1);
+	       VIDTCON2_HOZVAL(timing->xres - 1) |
+	       VIDTCON2_LINEVAL_E(timing->yres - 1) |
+	       VIDTCON2_HOZVAL_E(timing->xres - 1);
 	writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
 
 	/* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
 	}
 }
 
+static void fimd_wait_for_vblank(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+
+	if (ctx->suspended)
+		return;
+
+	atomic_set(&ctx->wait_vsync_event, 1);
+
+	/*
+	 * wait for FIMD to signal VSYNC interrupt or return after
+	 * timeout which is set to 50ms (refresh rate of 20).
+	 */
+	if (!wait_event_timeout(ctx->wait_vsync_queue,
+				!atomic_read(&ctx->wait_vsync_event),
+				DRM_HZ/20))
+		DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
 static struct exynos_drm_manager_ops fimd_manager_ops = {
 	.dpms = fimd_dpms,
 	.apply = fimd_apply,
 	.commit = fimd_commit,
 	.enable_vblank = fimd_enable_vblank,
 	.disable_vblank = fimd_disable_vblank,
+	.wait_for_vblank = fimd_wait_for_vblank,
 };
 
 static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
 	win_data->fb_width = overlay->fb_width;
 	win_data->fb_height = overlay->fb_height;
 	win_data->dma_addr = overlay->dma_addr[0] + offset;
-	win_data->vaddr = overlay->vaddr[0] + offset;
 	win_data->bpp = overlay->bpp;
 	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
 				(overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
 			win_data->offset_x, win_data->offset_y);
 	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
 			win_data->ovl_width, win_data->ovl_height);
-	DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
-			(unsigned long)win_data->dma_addr,
-			(unsigned long)win_data->vaddr);
+	DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
 	DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
 			overlay->fb_width, overlay->crtc_width);
 }
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
 	struct fimd_win_data *win_data;
 	int win = zpos;
 	unsigned long val, alpha, size;
+	unsigned int last_x;
+	unsigned int last_y;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
 
 	/* buffer size */
 	val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
-		VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+		VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+		VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+		VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
 	writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
 
 	/* OSD position */
 	val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
-		VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+		VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+		VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+		VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
 	writel(val, ctx->regs + VIDOSD_A(win));
 
-	val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
-					win_data->ovl_width - 1) |
-		VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
-					win_data->ovl_height - 1);
+	last_x = win_data->offset_x + win_data->ovl_width;
+	if (last_x)
+		last_x--;
+	last_y = win_data->offset_y + win_data->ovl_height;
+	if (last_y)
+		last_y--;
+
+	val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+		VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
 	writel(val, ctx->regs + VIDOSD_B(win));
 
 	DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
-			win_data->offset_x, win_data->offset_y,
-			win_data->offset_x + win_data->ovl_width - 1,
-			win_data->offset_y + win_data->ovl_height - 1);
+			win_data->offset_x, win_data->offset_y, last_x, last_y);
 
 	/* hardware window 0 doesn't support alpha channel. */
 	if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
 
 	win_data = &ctx->win_data[win];
 
+	if (ctx->suspended) {
+		/* do not resume this window*/
+		win_data->resume = false;
+		return;
+	}
+
 	/* protect windows */
 	val = readl(ctx->regs + SHADOWCON);
 	val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
 	win_data->enabled = false;
 }
 
-static void fimd_wait_for_vblank(struct device *dev)
-{
-	struct fimd_context *ctx = get_fimd_context(dev);
-	int ret;
-
-	ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
-					VIDCON1_VSTATUS_VSYNC), 50);
-	if (ret < 0)
-		DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
 static struct exynos_drm_overlay_ops fimd_overlay_ops = {
 	.mode_set = fimd_win_mode_set,
 	.commit = fimd_win_commit,
 	.disable = fimd_win_disable,
-	.wait_for_vblank = fimd_wait_for_vblank,
 };
 
 static struct exynos_drm_manager fimd_manager = {
@@ -623,7 +669,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
 	unsigned long flags;
-	bool is_checked = false;
 
 	spin_lock_irqsave(&drm_dev->event_lock, flags);
 
@@ -633,8 +678,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
 		if (crtc != e->pipe)
 			continue;
 
-		is_checked = true;
-
 		do_gettimeofday(&now);
 		e->event.sequence = 0;
 		e->event.tv_sec = now.tv_sec;
@@ -642,22 +685,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
 
 		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
 		wake_up_interruptible(&e->base.file_priv->event_wait);
-	}
-
-	if (is_checked) {
-		/*
-		 * call drm_vblank_put only in case that drm_vblank_get was
-		 * called.
-		 */
-		if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
-			drm_vblank_put(drm_dev, crtc);
-
-		/*
-		 * don't off vblank if vblank_disable_allowed is 1,
-		 * because vblank would be off by timer handler.
-		 */
-		if (!drm_dev->vblank_disable_allowed)
-			drm_vblank_off(drm_dev, crtc);
+		drm_vblank_put(drm_dev, crtc);
 	}
 
 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -684,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
 	drm_handle_vblank(drm_dev, manager->pipe);
 	fimd_finish_pageflip(drm_dev, manager->pipe);
 
+	/* set wait vsync event to zero and wake up queue. */
+	if (atomic_read(&ctx->wait_vsync_event)) {
+		atomic_set(&ctx->wait_vsync_event, 0);
+		DRM_WAKEUP(&ctx->wait_vsync_queue);
+	}
 out:
 	return IRQ_HANDLED;
 }
@@ -709,6 +742,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 	 */
 	drm_dev->vblank_disable_allowed = 1;
 
+	/* attach this sub driver to iommu mapping if supported. */
+	if (is_drm_iommu_supported(drm_dev))
+		drm_iommu_attach_device(drm_dev, dev);
+
 	return 0;
 }
 
@@ -716,7 +753,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
 {
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	/* TODO. */
+	/* detach this sub driver from iommu mapping if supported. */
+	if (is_drm_iommu_supported(drm_dev))
+		drm_iommu_detach_device(drm_dev, dev);
 }
 
 static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
 	return 0;
 }
 
+static void fimd_window_suspend(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	int i;
+
+	for (i = 0; i < WINDOWS_NR; i++) {
+		win_data = &ctx->win_data[i];
+		win_data->resume = win_data->enabled;
+		fimd_win_disable(dev, i);
+	}
+	fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	int i;
+
+	for (i = 0; i < WINDOWS_NR; i++) {
+		win_data = &ctx->win_data[i];
+		win_data->enabled = win_data->resume;
+		win_data->resume = false;
+	}
+}
+
 static int fimd_activate(struct fimd_context *ctx, bool enable)
 {
+	struct device *dev = ctx->subdrv.dev;
 	if (enable) {
 		int ret;
-		struct device *dev = ctx->subdrv.dev;
 
 		ret = fimd_clock(ctx, true);
 		if (ret < 0)
@@ -820,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
 		/* if vblank was enabled status, enable it again. */
 		if (test_and_clear_bit(0, &ctx->irq_flags))
 			fimd_enable_vblank(dev);
+
+		fimd_window_resume(dev);
 	} else {
+		fimd_window_suspend(dev);
+
 		fimd_clock(ctx, false);
 		ctx->suspended = true;
 	}
@@ -857,18 +927,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
 	if (!ctx)
 		return -ENOMEM;
 
-	ctx->bus_clk = clk_get(dev, "fimd");
+	ctx->bus_clk = devm_clk_get(dev, "fimd");
 	if (IS_ERR(ctx->bus_clk)) {
 		dev_err(dev, "failed to get bus clock\n");
-		ret = PTR_ERR(ctx->bus_clk);
-		goto err_clk_get;
+		return PTR_ERR(ctx->bus_clk);
 	}
 
-	ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+	ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
 	if (IS_ERR(ctx->lcd_clk)) {
 		dev_err(dev, "failed to get lcd clock\n");
-		ret = PTR_ERR(ctx->lcd_clk);
-		goto err_bus_clk;
+		return PTR_ERR(ctx->lcd_clk);
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +944,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
 	ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
 	if (!ctx->regs) {
 		dev_err(dev, "failed to map registers\n");
-		ret = -ENXIO;
-		goto err_clk;
+		return -ENXIO;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (!res) {
 		dev_err(dev, "irq request failed.\n");
-		goto err_clk;
+		return -ENXIO;
 	}
 
 	ctx->irq = res->start;
@@ -892,13 +959,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
 							0, "drm_fimd", ctx);
 	if (ret) {
 		dev_err(dev, "irq request failed.\n");
-		goto err_clk;
+		return ret;
 	}
 
 	ctx->vidcon0 = pdata->vidcon0;
 	ctx->vidcon1 = pdata->vidcon1;
 	ctx->default_win = pdata->default_win;
 	ctx->panel = panel;
+	DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+	atomic_set(&ctx->wait_vsync_event, 0);
 
 	subdrv = &ctx->subdrv;
 
@@ -926,17 +995,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
 	exynos_drm_subdrv_register(subdrv);
 
 	return 0;
-
-err_clk:
-	clk_disable(ctx->lcd_clk);
-	clk_put(ctx->lcd_clk);
-
-err_bus_clk:
-	clk_disable(ctx->bus_clk);
-	clk_put(ctx->bus_clk);
-
-err_clk_get:
-	return ret;
 }
 
 static int __devexit fimd_remove(struct platform_device *pdev)
@@ -960,9 +1018,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
 out:
 	pm_runtime_disable(dev);
 
-	clk_put(ctx->lcd_clk);
-	clk_put(ctx->bus_clk);
-
 	return 0;
 }
 
@@ -1056,5 +1111,6 @@ struct platform_driver fimd_driver = {
 		.name	= "exynos4-fb",
 		.owner	= THIS_MODULE,
 		.pm	= &fimd_pm_ops,
+		.of_match_table = of_match_ptr(fimd_driver_dt_match),
 	},
 };
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46c..6ffa0763c078 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
 
 #define G2D_HW_MAJOR_VER		4
 #define G2D_HW_MINOR_VER		1
@@ -92,11 +95,21 @@
 #define G2D_CMDLIST_POOL_SIZE		(G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
 #define G2D_CMDLIST_DATA_NUM		(G2D_CMDLIST_SIZE / sizeof(u32) - 2)
 
+#define MAX_BUF_ADDR_NR			6
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL		(64 * 1024 * 1024)
+
+enum {
+	BUF_TYPE_GEM = 1,
+	BUF_TYPE_USERPTR,
+};
+
 /* cmdlist data structure */
 struct g2d_cmdlist {
-	u32	head;
-	u32	data[G2D_CMDLIST_DATA_NUM];
-	u32	last;	/* last data offset */
+	u32		head;
+	unsigned long	data[G2D_CMDLIST_DATA_NUM];
+	u32		last;	/* last data offset */
 };
 
 struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
 	struct drm_exynos_g2d_event	event;
 };
 
-struct g2d_gem_node {
+struct g2d_cmdlist_userptr {
 	struct list_head	list;
-	unsigned int		handle;
+	dma_addr_t		dma_addr;
+	unsigned long		userptr;
+	unsigned long		size;
+	struct page		**pages;
+	unsigned int		npages;
+	struct sg_table		*sgt;
+	struct vm_area_struct	*vma;
+	atomic_t		refcount;
+	bool			in_pool;
+	bool			out_of_list;
 };
 
 struct g2d_cmdlist_node {
 	struct list_head	list;
 	struct g2d_cmdlist	*cmdlist;
-	unsigned int		gem_nr;
+	unsigned int		map_nr;
+	unsigned long		handles[MAX_BUF_ADDR_NR];
+	unsigned int		obj_type[MAX_BUF_ADDR_NR];
 	dma_addr_t		dma_addr;
 
 	struct drm_exynos_pending_g2d_event	*event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
 	struct list_head	list;
 	struct list_head	run_cmdlist;
 	struct list_head	event_list;
+	struct drm_file		*filp;
 	pid_t			pid;
 	struct completion	complete;
 	int			async;
@@ -143,23 +168,33 @@ struct g2d_data {
 	struct mutex			cmdlist_mutex;
 	dma_addr_t			cmdlist_pool;
 	void				*cmdlist_pool_virt;
+	struct dma_attrs		cmdlist_dma_attrs;
 
 	/* runqueue*/
 	struct g2d_runqueue_node	*runqueue_node;
 	struct list_head		runqueue;
 	struct mutex			runqueue_mutex;
 	struct kmem_cache		*runqueue_slab;
+
+	unsigned long			current_pool;
+	unsigned long			max_pool;
 };
 
 static int g2d_init_cmdlist(struct g2d_data *g2d)
 {
 	struct device *dev = g2d->dev;
 	struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+	struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
 	int nr;
 	int ret;
 
-	g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
-						&g2d->cmdlist_pool, GFP_KERNEL);
+	init_dma_attrs(&g2d->cmdlist_dma_attrs);
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+	g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+						G2D_CMDLIST_POOL_SIZE,
+						&g2d->cmdlist_pool, GFP_KERNEL,
+						&g2d->cmdlist_dma_attrs);
 	if (!g2d->cmdlist_pool_virt) {
 		dev_err(dev, "failed to allocate dma memory\n");
 		return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
 	return 0;
 
 err:
-	dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
-			g2d->cmdlist_pool);
+	dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+			g2d->cmdlist_pool_virt,
+			g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
 	return ret;
 }
 
 static void g2d_fini_cmdlist(struct g2d_data *g2d)
 {
-	struct device *dev = g2d->dev;
+	struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
 
 	kfree(g2d->cmdlist_node);
-	dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
-			g2d->cmdlist_pool);
+	dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+			g2d->cmdlist_pool_virt,
+			g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
 }
 
 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
 		list_add_tail(&node->event->base.link, &g2d_priv->event_list);
 }
 
-static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
-			       struct drm_file *file,
-			       struct g2d_cmdlist_node *node)
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+					unsigned long obj,
+					bool force)
 {
-	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct g2d_cmdlist_userptr *g2d_userptr =
+					(struct g2d_cmdlist_userptr *)obj;
+
+	if (!obj)
+		return;
+
+	if (force)
+		goto out;
+
+	atomic_dec(&g2d_userptr->refcount);
+
+	if (atomic_read(&g2d_userptr->refcount) > 0)
+		return;
+
+	if (g2d_userptr->in_pool)
+		return;
+
+out:
+	exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+					DMA_BIDIRECTIONAL);
+
+	exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+					g2d_userptr->npages,
+					g2d_userptr->vma);
+
+	if (!g2d_userptr->out_of_list)
+		list_del_init(&g2d_userptr->list);
+
+	sg_free_table(g2d_userptr->sgt);
+	kfree(g2d_userptr->sgt);
+	g2d_userptr->sgt = NULL;
+
+	kfree(g2d_userptr->pages);
+	g2d_userptr->pages = NULL;
+	kfree(g2d_userptr);
+	g2d_userptr = NULL;
+}
+
+dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+					unsigned long userptr,
+					unsigned long size,
+					struct drm_file *filp,
+					unsigned long *obj)
+{
+	struct drm_exynos_file_private *file_priv = filp->driver_priv;
 	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+	struct g2d_cmdlist_userptr *g2d_userptr;
+	struct g2d_data *g2d;
+	struct page **pages;
+	struct sg_table	*sgt;
+	struct vm_area_struct *vma;
+	unsigned long start, end;
+	unsigned int npages, offset;
+	int ret;
+
+	if (!size) {
+		DRM_ERROR("invalid userptr size.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	g2d = dev_get_drvdata(g2d_priv->dev);
+
+	/* check if userptr already exists in userptr_list. */
+	list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+		if (g2d_userptr->userptr == userptr) {
+			/*
+			 * also check size because there could be same address
+			 * and different size.
+			 */
+			if (g2d_userptr->size == size) {
+				atomic_inc(&g2d_userptr->refcount);
+				*obj = (unsigned long)g2d_userptr;
+
+				return &g2d_userptr->dma_addr;
+			}
+
+			/*
+			 * at this moment, maybe g2d dma is accessing this
+			 * g2d_userptr memory region so just remove this
+			 * g2d_userptr object from userptr_list not to be
+			 * referred again and also except it the userptr
+			 * pool to be released after the dma access completion.
+			 */
+			g2d_userptr->out_of_list = true;
+			g2d_userptr->in_pool = false;
+			list_del_init(&g2d_userptr->list);
+
+			break;
+		}
+	}
+
+	g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+	if (!g2d_userptr) {
+		DRM_ERROR("failed to allocate g2d_userptr.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	atomic_set(&g2d_userptr->refcount, 1);
+
+	start = userptr & PAGE_MASK;
+	offset = userptr & ~PAGE_MASK;
+	end = PAGE_ALIGN(userptr + size);
+	npages = (end - start) >> PAGE_SHIFT;
+	g2d_userptr->npages = npages;
+
+	pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
+	if (!pages) {
+		DRM_ERROR("failed to allocate pages.\n");
+		kfree(g2d_userptr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	vma = find_vma(current->mm, userptr);
+	if (!vma) {
+		DRM_ERROR("failed to get vm region.\n");
+		ret = -EFAULT;
+		goto err_free_pages;
+	}
+
+	if (vma->vm_end < userptr + size) {
+		DRM_ERROR("vma is too small.\n");
+		ret = -EFAULT;
+		goto err_free_pages;
+	}
+
+	g2d_userptr->vma = exynos_gem_get_vma(vma);
+	if (!g2d_userptr->vma) {
+		DRM_ERROR("failed to copy vma.\n");
+		ret = -ENOMEM;
+		goto err_free_pages;
+	}
+
+	g2d_userptr->size = size;
+
+	ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+						npages, pages, vma);
+	if (ret < 0) {
+		DRM_ERROR("failed to get user pages from userptr.\n");
+		goto err_put_vma;
+	}
+
+	g2d_userptr->pages = pages;
+
+	sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+	if (!sgt) {
+		DRM_ERROR("failed to allocate sg table.\n");
+		ret = -ENOMEM;
+		goto err_free_userptr;
+	}
+
+	ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+					size, GFP_KERNEL);
+	if (ret < 0) {
+		DRM_ERROR("failed to get sgt from pages.\n");
+		goto err_free_sgt;
+	}
+
+	g2d_userptr->sgt = sgt;
+
+	ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+						DMA_BIDIRECTIONAL);
+	if (ret < 0) {
+		DRM_ERROR("failed to map sgt with dma region.\n");
+		goto err_free_sgt;
+	}
+
+	g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+	g2d_userptr->userptr = userptr;
+
+	list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+	if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+		g2d->current_pool += npages << PAGE_SHIFT;
+		g2d_userptr->in_pool = true;
+	}
+
+	*obj = (unsigned long)g2d_userptr;
+
+	return &g2d_userptr->dma_addr;
+
+err_free_sgt:
+	sg_free_table(sgt);
+	kfree(sgt);
+	sgt = NULL;
+
+err_free_userptr:
+	exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+					g2d_userptr->npages,
+					g2d_userptr->vma);
+
+err_put_vma:
+	exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+	kfree(pages);
+	kfree(g2d_userptr);
+	pages = NULL;
+	g2d_userptr = NULL;
+
+	return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+					struct g2d_data *g2d,
+					struct drm_file *filp)
+{
+	struct drm_exynos_file_private *file_priv = filp->driver_priv;
+	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+	struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+	list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+		if (g2d_userptr->in_pool)
+			g2d_userptr_put_dma_addr(drm_dev,
+						(unsigned long)g2d_userptr,
+						true);
+
+	g2d->current_pool = 0;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+				struct g2d_cmdlist_node *node,
+				struct drm_device *drm_dev,
+				struct drm_file *file)
+{
 	struct g2d_cmdlist *cmdlist = node->cmdlist;
-	dma_addr_t *addr;
 	int offset;
 	int i;
 
-	for (i = 0; i < node->gem_nr; i++) {
-		struct g2d_gem_node *gem_node;
-
-		gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
-		if (!gem_node) {
-			dev_err(g2d_priv->dev, "failed to allocate gem node\n");
-			return -ENOMEM;
-		}
+	for (i = 0; i < node->map_nr; i++) {
+		unsigned long handle;
+		dma_addr_t *addr;
 
 		offset = cmdlist->last - (i * 2 + 1);
-		gem_node->handle = cmdlist->data[offset];
+		handle = cmdlist->data[offset];
 
-		addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
-						   file);
-		if (IS_ERR(addr)) {
-			node->gem_nr = i;
-			kfree(gem_node);
-			return PTR_ERR(addr);
+		if (node->obj_type[i] == BUF_TYPE_GEM) {
+			addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+								file);
+			if (IS_ERR(addr)) {
+				node->map_nr = i;
+				return -EFAULT;
+			}
+		} else {
+			struct drm_exynos_g2d_userptr g2d_userptr;
+
+			if (copy_from_user(&g2d_userptr, (void __user *)handle,
+				sizeof(struct drm_exynos_g2d_userptr))) {
+				node->map_nr = i;
+				return -EFAULT;
+			}
+
+			addr = g2d_userptr_get_dma_addr(drm_dev,
+							g2d_userptr.userptr,
+							g2d_userptr.size,
+							file,
+							&handle);
+			if (IS_ERR(addr)) {
+				node->map_nr = i;
+				return -EFAULT;
+			}
 		}
 
 		cmdlist->data[offset] = *addr;
-		list_add_tail(&gem_node->list, &g2d_priv->gem_list);
-		g2d_priv->gem_nr++;
+		node->handles[i] = handle;
 	}
 
 	return 0;
 }
 
-static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
-				struct drm_file *file,
-				unsigned int nr)
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+				  struct g2d_cmdlist_node *node,
+				  struct drm_file *filp)
 {
-	struct drm_exynos_file_private *file_priv = file->driver_priv;
-	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
-	struct g2d_gem_node *node, *n;
+	struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+	int i;
 
-	list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
-		if (!nr)
-			break;
+	for (i = 0; i < node->map_nr; i++) {
+		unsigned long handle = node->handles[i];
 
-		exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
-		list_del_init(&node->list);
-		kfree(node);
-		nr--;
+		if (node->obj_type[i] == BUF_TYPE_GEM)
+			exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+							filp);
+		else
+			g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+							false);
+
+		node->handles[i] = 0;
 	}
+
+	node->map_nr = 0;
 }
 
 static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
 static void g2d_free_runqueue_node(struct g2d_data *g2d,
 				   struct g2d_runqueue_node *runqueue_node)
 {
+	struct g2d_cmdlist_node *node;
+
 	if (!runqueue_node)
 		return;
 
 	mutex_lock(&g2d->cmdlist_mutex);
+	/*
+	 * commands in run_cmdlist have been completed so unmap all gem
+	 * objects in each command node so that they are unreferenced.
+	 */
+	list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+		g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
 	list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
 	mutex_unlock(&g2d->cmdlist_mutex);
 
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+static int g2d_check_reg_offset(struct device *dev,
+				struct g2d_cmdlist_node *node,
 				int nr, bool for_addr)
 {
+	struct g2d_cmdlist *cmdlist = node->cmdlist;
 	int reg_offset;
 	int index;
 	int i;
 
 	for (i = 0; i < nr; i++) {
 		index = cmdlist->last - 2 * (i + 1);
+
+		if (for_addr) {
+			/* check userptr buffer type. */
+			reg_offset = (cmdlist->data[index] &
+					~0x7fffffff) >> 31;
+			if (reg_offset) {
+				node->obj_type[i] = BUF_TYPE_USERPTR;
+				cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+			}
+		}
+
 		reg_offset = cmdlist->data[index] & ~0xfffff000;
 
 		if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
 		case G2D_MSK_BASE_ADDR:
 			if (!for_addr)
 				goto err;
+
+			if (node->obj_type[i] != BUF_TYPE_USERPTR)
+				node->obj_type[i] = BUF_TYPE_GEM;
 			break;
 		default:
 			if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
 	return 0;
 
 err:
-	dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+	dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
 	return -EINVAL;
 }
 
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
 	}
 
 	/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
-	size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+	size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
 	if (size > G2D_CMDLIST_DATA_NUM) {
 		dev_err(dev, "cmdlist size is too big\n");
 		ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
 	}
 	cmdlist->last += req->cmd_nr * 2;
 
-	ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+	ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
 	if (ret < 0)
 		goto err_free_event;
 
-	node->gem_nr = req->cmd_gem_nr;
-	if (req->cmd_gem_nr) {
-		struct drm_exynos_g2d_cmd *cmd_gem;
+	node->map_nr = req->cmd_buf_nr;
+	if (req->cmd_buf_nr) {
+		struct drm_exynos_g2d_cmd *cmd_buf;
 
-		cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+		cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
 
 		if (copy_from_user(cmdlist->data + cmdlist->last,
-					(void __user *)cmd_gem,
-					sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+					(void __user *)cmd_buf,
+					sizeof(*cmd_buf) * req->cmd_buf_nr)) {
 			ret = -EFAULT;
 			goto err_free_event;
 		}
-		cmdlist->last += req->cmd_gem_nr * 2;
+		cmdlist->last += req->cmd_buf_nr * 2;
 
-		ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+		ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
 		if (ret < 0)
 			goto err_free_event;
 
-		ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+		ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
 		if (ret < 0)
 			goto err_unmap;
 	}
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
 	return 0;
 
 err_unmap:
-	g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+	g2d_unmap_cmdlist_gem(g2d, node, file);
 err_free_event:
 	if (node->event) {
 		spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
 
 	mutex_lock(&g2d->runqueue_mutex);
 	runqueue_node->pid = current->pid;
+	runqueue_node->filp = file;
 	list_add_tail(&runqueue_node->list, &g2d->runqueue);
 	if (!g2d->runqueue_node)
 		g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
 }
 EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
 
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+	struct g2d_data *g2d;
+	int ret;
+
+	g2d = dev_get_drvdata(dev);
+	if (!g2d)
+		return -EFAULT;
+
+	/* allocate dma-aware cmdlist buffer. */
+	ret = g2d_init_cmdlist(g2d);
+	if (ret < 0) {
+		dev_err(dev, "cmdlist init failed\n");
+		return ret;
+	}
+
+	if (!is_drm_iommu_supported(drm_dev))
+		return 0;
+
+	ret = drm_iommu_attach_device(drm_dev, dev);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable iommu.\n");
+		g2d_fini_cmdlist(g2d);
+	}
+
+	return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+	if (!is_drm_iommu_supported(drm_dev))
+		return;
+
+	drm_iommu_detach_device(drm_dev, dev);
+}
+
 static int g2d_open(struct drm_device *drm_dev, struct device *dev,
 			struct drm_file *file)
 {
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
 
 	INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
 	INIT_LIST_HEAD(&g2d_priv->event_list);
-	INIT_LIST_HEAD(&g2d_priv->gem_list);
+	INIT_LIST_HEAD(&g2d_priv->userptr_list);
 
 	return 0;
 }
@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
 		return;
 
 	mutex_lock(&g2d->cmdlist_mutex);
-	list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+	list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+		/*
+		 * unmap all gem objects not completed.
+		 *
+		 * P.S. if current process was terminated forcely then
+		 * there may be some commands in inuse_cmdlist so unmap
+		 * them.
+		 */
+		g2d_unmap_cmdlist_gem(g2d, node, file);
 		list_move_tail(&node->list, &g2d->free_cmdlist);
+	}
 	mutex_unlock(&g2d->cmdlist_mutex);
 
-	g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+	/* release all g2d_userptr in pool. */
+	g2d_userptr_free_all(drm_dev, g2d, file);
 
 	kfree(file_priv->g2d_priv);
 }
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
 	mutex_init(&g2d->cmdlist_mutex);
 	mutex_init(&g2d->runqueue_mutex);
 
-	ret = g2d_init_cmdlist(g2d);
-	if (ret < 0)
-		goto err_destroy_workqueue;
-
-	g2d->gate_clk = clk_get(dev, "fimg2d");
+	g2d->gate_clk = devm_clk_get(dev, "fimg2d");
 	if (IS_ERR(g2d->gate_clk)) {
 		dev_err(dev, "failed to get gate clock\n");
 		ret = PTR_ERR(g2d->gate_clk);
-		goto err_fini_cmdlist;
+		goto err_destroy_workqueue;
 	}
 
 	pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
 		goto err_put_clk;
 	}
 
+	g2d->max_pool = MAX_POOL;
+
 	platform_set_drvdata(pdev, g2d);
 
 	subdrv = &g2d->subdrv;
 	subdrv->dev = dev;
+	subdrv->probe = g2d_subdrv_probe;
+	subdrv->remove = g2d_subdrv_remove;
 	subdrv->open = g2d_open;
 	subdrv->close = g2d_close;
 
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
 
 err_put_clk:
 	pm_runtime_disable(dev);
-	clk_put(g2d->gate_clk);
-err_fini_cmdlist:
-	g2d_fini_cmdlist(g2d);
 err_destroy_workqueue:
 	destroy_workqueue(g2d->g2d_workq);
 err_destroy_slab:
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
 	}
 
 	pm_runtime_disable(&pdev->dev);
-	clk_put(g2d->gate_clk);
 
 	g2d_fini_cmdlist(g2d);
 	destroy_workqueue(g2d->g2d_workq);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664f..d48183e7e056 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
 
 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
 {
-	if (!IS_NONCONTIG_BUFFER(flags)) {
-		if (size >= SZ_1M)
-			return roundup(size, SECTION_SIZE);
-		else if (size >= SZ_64K)
-			return roundup(size, SZ_64K);
-		else
-			goto out;
-	}
-out:
+	/* TODO */
+
 	return roundup(size, PAGE_SIZE);
 }
 
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
-						gfp_t gfpmask)
-{
-	struct page *p, **pages;
-	int i, npages;
-
-	npages = obj->size >> PAGE_SHIFT;
-
-	pages = drm_malloc_ab(npages, sizeof(struct page *));
-	if (pages == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	for (i = 0; i < npages; i++) {
-		p = alloc_page(gfpmask);
-		if (IS_ERR(p))
-			goto fail;
-		pages[i] = p;
-	}
-
-	return pages;
-
-fail:
-	while (--i)
-		__free_page(pages[i]);
-
-	drm_free_large(pages);
-	return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
-					struct page **pages)
-{
-	int npages;
-
-	npages = obj->size >> PAGE_SHIFT;
-
-	while (--npages >= 0)
-		__free_page(pages[npages]);
-
-	drm_free_large(pages);
-}
-
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
 					struct vm_area_struct *vma,
 					unsigned long f_vaddr,
 					pgoff_t page_offset)
-{
-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-	unsigned long pfn;
-
-	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-		if (!buf->pages)
-			return -EINTR;
-
-		pfn = page_to_pfn(buf->pages[page_offset++]);
-	} else
-		pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
-	return vm_insert_mixed(vma, f_vaddr, pfn);
-}
-
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
 	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
 	struct scatterlist *sgl;
-	struct page **pages;
-	unsigned int npages, i = 0;
-	int ret;
+	unsigned long pfn;
+	int i;
 
-	if (buf->pages) {
-		DRM_DEBUG_KMS("already allocated.\n");
+	if (!buf->sgt)
+		return -EINTR;
+
+	if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+		DRM_ERROR("invalid page offset\n");
 		return -EINVAL;
 	}
 
-	pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
-	if (IS_ERR(pages)) {
-		DRM_ERROR("failed to get pages.\n");
-		return PTR_ERR(pages);
-	}
-
-	npages = obj->size >> PAGE_SHIFT;
-	buf->page_size = PAGE_SIZE;
-
-	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!buf->sgt) {
-		DRM_ERROR("failed to allocate sg table.\n");
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
-	if (ret < 0) {
-		DRM_ERROR("failed to initialize sg table.\n");
-		ret = -EFAULT;
-		goto err1;
-	}
-
 	sgl = buf->sgt->sgl;
-
-	/* set all pages to sg list. */
-	while (i < npages) {
-		sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
-		sg_dma_address(sgl) = page_to_phys(pages[i]);
-		i++;
-		sgl = sg_next(sgl);
+	for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+		if (page_offset < (sgl->length >> PAGE_SHIFT))
+			break;
+		page_offset -=	(sgl->length >> PAGE_SHIFT);
 	}
 
-	/* add some codes for UNCACHED type here. TODO */
+	pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
 
-	buf->pages = pages;
-	return ret;
-err1:
-	kfree(buf->sgt);
-	buf->sgt = NULL;
-err:
-	exynos_gem_put_pages(obj, pages);
-	return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
-	/*
-	 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
-	 * allocated at gem fault handler.
-	 */
-	sg_free_table(buf->sgt);
-	kfree(buf->sgt);
-	buf->sgt = NULL;
-
-	exynos_gem_put_pages(obj, buf->pages);
-	buf->pages = NULL;
-
-	/* add some codes for UNCACHED type here. TODO */
+	return vm_insert_mixed(vma, f_vaddr, pfn);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 
 	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
 
-	if (!buf->pages)
-		return;
-
 	/*
 	 * do not release memory region from exporter.
 	 *
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 	if (obj->import_attach)
 		goto out;
 
-	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
-		exynos_drm_gem_put_pages(obj);
-	else
-		exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+	exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
 
 out:
 	exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
 	/* set memory type and cache attribute from user side. */
 	exynos_gem_obj->flags = flags;
 
-	/*
-	 * allocate all pages as desired size if user wants to allocate
-	 * physically non-continuous memory.
-	 */
-	if (flags & EXYNOS_BO_NONCONTIG) {
-		ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
-		if (ret < 0) {
-			drm_gem_object_release(&exynos_gem_obj->base);
-			goto err_fini_buf;
-		}
-	} else {
-		ret = exynos_drm_alloc_buf(dev, buf, flags);
-		if (ret < 0) {
-			drm_gem_object_release(&exynos_gem_obj->base);
-			goto err_fini_buf;
-		}
+	ret = exynos_drm_alloc_buf(dev, buf, flags);
+	if (ret < 0) {
+		drm_gem_object_release(&exynos_gem_obj->base);
+		goto err_fini_buf;
 	}
 
 	return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
 					unsigned int gem_handle,
-					struct drm_file *file_priv)
+					struct drm_file *filp)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
 	struct drm_gem_object *obj;
 
-	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+	obj = drm_gem_object_lookup(dev, filp, gem_handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
 		return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
 
 	exynos_gem_obj = to_exynos_gem_obj(obj);
 
-	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
-		drm_gem_object_unreference_unlocked(obj);
-
-		/* TODO */
-		return ERR_PTR(-EINVAL);
-	}
-
 	return &exynos_gem_obj->buffer->dma_addr;
 }
 
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
 					unsigned int gem_handle,
-					struct drm_file *file_priv)
+					struct drm_file *filp)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
 	struct drm_gem_object *obj;
 
-	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+	obj = drm_gem_object_lookup(dev, filp, gem_handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
 		return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
 
 	exynos_gem_obj = to_exynos_gem_obj(obj);
 
-	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
-		drm_gem_object_unreference_unlocked(obj);
-
-		/* TODO */
-		return;
-	}
-
 	drm_gem_object_unreference_unlocked(obj);
 
 	/*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
 			&args->offset);
 }
 
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+							struct file *filp)
+{
+	struct drm_file *file_priv;
+
+	mutex_lock(&drm_dev->struct_mutex);
+
+	/* find current process's drm_file from filelist. */
+	list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+		if (file_priv->filp == filp) {
+			mutex_unlock(&drm_dev->struct_mutex);
+			return file_priv;
+		}
+	}
+
+	mutex_unlock(&drm_dev->struct_mutex);
+	WARN_ON(1);
+
+	return ERR_PTR(-EFAULT);
+}
+
 static int exynos_drm_gem_mmap_buffer(struct file *filp,
 				      struct vm_area_struct *vma)
 {
 	struct drm_gem_object *obj = filp->private_data;
 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	struct drm_device *drm_dev = obj->dev;
 	struct exynos_drm_gem_buf *buffer;
-	unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+	struct drm_file *file_priv;
+	unsigned long vm_size;
 	int ret;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data = obj;
+	vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+	/* restore it to driver's fops. */
+	filp->f_op = fops_get(drm_dev->driver->fops);
+
+	file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+	if (IS_ERR(file_priv))
+		return PTR_ERR(file_priv);
+
+	/* restore it to drm_file. */
+	filp->private_data = file_priv;
 
 	update_vm_cache_attr(exynos_gem_obj, vma);
 
-	vm_size = usize = vma->vm_end - vma->vm_start;
+	vm_size = vma->vm_end - vma->vm_start;
 
 	/*
 	 * a buffer contains information to physically continuous memory
@@ -516,41 +400,24 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
 	if (vm_size > buffer->size)
 		return -EINVAL;
 
-	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-		int i = 0;
-
-		if (!buffer->pages)
-			return -EINVAL;
-
-		vma->vm_flags |= VM_MIXEDMAP;
-
-		do {
-			ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
-			if (ret) {
-				DRM_ERROR("failed to remap user space.\n");
-				return ret;
-			}
-
-			uaddr += PAGE_SIZE;
-			usize -= PAGE_SIZE;
-		} while (usize > 0);
-	} else {
-		/*
-		 * get page frame number to physical memory to be mapped
-		 * to user space.
-		 */
-		pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
-								PAGE_SHIFT;
-
-		DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
-		if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
-					vma->vm_page_prot)) {
-			DRM_ERROR("failed to remap pfn range.\n");
-			return -EAGAIN;
-		}
+	ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+				buffer->dma_addr, buffer->size,
+				&buffer->dma_attrs);
+	if (ret < 0) {
+		DRM_ERROR("failed to mmap.\n");
+		return ret;
 	}
 
+	/*
+	 * take a reference to this mapping of the object. And this reference
+	 * is unreferenced by the corresponding vm_close call.
+	 */
+	drm_gem_object_reference(obj);
+
+	mutex_lock(&drm_dev->struct_mutex);
+	drm_vm_open_locked(drm_dev, vma);
+	mutex_unlock(&drm_dev->struct_mutex);
+
 	return 0;
 }
 
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
-	obj->filp->f_op = &exynos_drm_gem_fops;
-	obj->filp->private_data = obj;
+	/*
+	 * Set specific mmper's fops. And it will be restored by
+	 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+	 * This is used to call specific mapper temporarily.
+	 */
+	file_priv->filp->f_op = &exynos_drm_gem_fops;
 
-	addr = vm_mmap(obj->filp, 0, args->size,
+	/*
+	 * Set gem object to private_data so that specific mmaper
+	 * can get the gem object. And it will be restored by
+	 * exynos_drm_gem_mmap_buffer to drm_file.
+	 */
+	file_priv->filp->private_data = obj;
+
+	addr = vm_mmap(file_priv->filp, 0, args->size,
 			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
 
 	drm_gem_object_unreference_unlocked(obj);
 
-	if (IS_ERR((void *)addr))
+	if (IS_ERR((void *)addr)) {
+		file_priv->filp->private_data = file_priv;
 		return PTR_ERR((void *)addr);
+	}
 
 	args->mapped = addr;
 
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+	struct vm_area_struct *vma_copy;
+
+	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+	if (!vma_copy)
+		return NULL;
+
+	if (vma->vm_ops && vma->vm_ops->open)
+		vma->vm_ops->open(vma);
+
+	if (vma->vm_file)
+		get_file(vma->vm_file);
+
+	memcpy(vma_copy, vma, sizeof(*vma));
+
+	vma_copy->vm_mm = NULL;
+	vma_copy->vm_next = NULL;
+	vma_copy->vm_prev = NULL;
+
+	return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+	if (!vma)
+		return;
+
+	if (vma->vm_ops && vma->vm_ops->close)
+		vma->vm_ops->close(vma);
+
+	if (vma->vm_file)
+		fput(vma->vm_file);
+
+	kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+						unsigned int npages,
+						struct page **pages,
+						struct vm_area_struct *vma)
+{
+	int get_npages;
+
+	/* the memory region mmaped with VM_PFNMAP. */
+	if (vma_is_io(vma)) {
+		unsigned int i;
+
+		for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+			unsigned long pfn;
+			int ret = follow_pfn(vma, start, &pfn);
+			if (ret)
+				return ret;
+
+			pages[i] = pfn_to_page(pfn);
+		}
+
+		if (i != npages) {
+			DRM_ERROR("failed to get user_pages.\n");
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	get_npages = get_user_pages(current, current->mm, start,
+					npages, 1, 1, pages, NULL);
+	get_npages = max(get_npages, 0);
+	if (get_npages != npages) {
+		DRM_ERROR("failed to get user_pages.\n");
+		while (get_npages)
+			put_page(pages[--get_npages]);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+					unsigned int npages,
+					struct vm_area_struct *vma)
+{
+	if (!vma_is_io(vma)) {
+		unsigned int i;
+
+		for (i = 0; i < npages; i++) {
+			set_page_dirty_lock(pages[i]);
+
+			/*
+			 * undo the reference we took when populating
+			 * the table.
+			 */
+			put_page(pages[i]);
+		}
+	}
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+				struct sg_table *sgt,
+				enum dma_data_direction dir)
+{
+	int nents;
+
+	mutex_lock(&drm_dev->struct_mutex);
+
+	nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+	if (!nents) {
+		DRM_ERROR("failed to map sgl with dma.\n");
+		mutex_unlock(&drm_dev->struct_mutex);
+		return nents;
+	}
+
+	mutex_unlock(&drm_dev->struct_mutex);
+	return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+				struct sg_table *sgt,
+				enum dma_data_direction dir)
+{
+	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
 {
 	DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 	mutex_lock(&dev->struct_mutex);
 
-	ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+	ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
 	if (ret < 0)
-		DRM_ERROR("failed to map pages.\n");
+		DRM_ERROR("failed to map a buffer with user.\n");
 
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f70..f11f2afd5bfc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -35,21 +35,27 @@
  * exynos drm gem buffer structure.
  *
  * @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
  * @dma_addr: bus address(accessed by dma) to allocated memory region.
  *	- this address could be physical address without IOMMU and
  *	device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
  * @sgt: sg table to transfer page data.
- * @pages: contain all pages to allocated memory region.
- * @page_size: could be 4K, 64K or 1MB.
  * @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ *	VM_PFNMAP or not.
  */
 struct exynos_drm_gem_buf {
 	void __iomem		*kvaddr;
+	unsigned long		userptr;
 	dma_addr_t		dma_addr;
-	struct sg_table		*sgt;
+	struct dma_attrs	dma_attrs;
+	unsigned int		write;
 	struct page		**pages;
-	unsigned long		page_size;
+	struct sg_table		*sgt;
 	unsigned long		size;
+	bool			pfnmap;
 };
 
 /*
@@ -65,6 +71,7 @@ struct exynos_drm_gem_buf {
  *	or at framebuffer creation.
  * @size: size requested from user, in bytes and this size is aligned
  *	in page unit.
+ * @vma: a pointer to vm_area.
  * @flags: indicate memory type to allocated buffer and cache attruibute.
  *
  * P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +81,7 @@ struct exynos_drm_gem_obj {
 	struct drm_gem_object		base;
 	struct exynos_drm_gem_buf	*buffer;
 	unsigned long			size;
+	struct vm_area_struct		*vma;
 	unsigned int			flags;
 };
 
@@ -104,9 +112,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
  * other drivers such as 2d/3d acceleration drivers.
  * with this function call, gem object reference count would be increased.
  */
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
 					unsigned int gem_handle,
-					struct drm_file *file_priv);
+					struct drm_file *filp);
 
 /*
  * put dma address from gem handle and this function could be used for
@@ -115,7 +123,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
  */
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
 					unsigned int gem_handle,
-					struct drm_file *file_priv);
+					struct drm_file *filp);
 
 /* get buffer offset to map to user space. */
 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +136,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv);
+
 /* get buffer information to memory region allocated by gem. */
 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
 				      struct drm_file *file_priv);
@@ -163,4 +175,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+						unsigned int npages,
+						struct page **pages,
+						struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+					unsigned int npages,
+					struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+				struct sg_table *sgt,
+				enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+				struct sg_table *sgt,
+				enum dma_data_direction dir);
+
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 000000000000..5639353d47b9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1870 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC is stand for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS	4
+#define GSC_MAX_SRC		4
+#define GSC_MAX_DST		16
+#define GSC_RESET_TIMEOUT	50
+#define GSC_BUF_STOP	1
+#define GSC_BUF_START	2
+#define GSC_REG_SZ		16
+#define GSC_WIDTH_ITU_709	1280
+#define GSC_SC_UP_MAX_RATIO		65536
+#define GSC_SC_DOWN_RATIO_7_8		74898
+#define GSC_SC_DOWN_RATIO_6_8		87381
+#define GSC_SC_DOWN_RATIO_5_8		104857
+#define GSC_SC_DOWN_RATIO_4_8		131072
+#define GSC_SC_DOWN_RATIO_3_8		174762
+#define GSC_SC_DOWN_RATIO_2_8		262144
+#define GSC_REFRESH_MIN	12
+#define GSC_REFRESH_MAX	60
+#define GSC_CROP_MAX	8192
+#define GSC_CROP_MIN	32
+#define GSC_SCALE_MAX	4224
+#define GSC_SCALE_MIN	32
+#define GSC_COEF_RATIO	7
+#define GSC_COEF_PHASE	9
+#define GSC_COEF_ATTR	16
+#define GSC_COEF_H_8T	8
+#define GSC_COEF_V_4T	4
+#define GSC_COEF_DEPTH	3
+
+#define get_gsc_context(dev)	platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
+					struct gsc_context, ippdrv);
+#define gsc_read(offset)		readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset)	writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+	bool	range;
+	u32	pre_shfactor;
+	u32	pre_hratio;
+	u32	pre_vratio;
+	unsigned long main_hratio;
+	unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+	/* tile or rotation */
+	u32	tile_w;
+	u32	tile_h;
+	/* other cases */
+	u32	w;
+	u32	h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+	struct exynos_drm_ippdrv	ippdrv;
+	struct resource	*regs_res;
+	void __iomem	*regs;
+	struct mutex	lock;
+	struct clk	*gsc_clk;
+	struct gsc_scaler	sc;
+	int	id;
+	int	irq;
+	bool	rotation;
+	bool	suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+	{	/* Ratio <= 65536 (~8:8) */
+		{  0,  0,   0, 128,   0,   0,  0,  0 },
+		{ -1,  2,  -6, 127,   7,  -2,  1,  0 },
+		{ -1,  4, -12, 125,  16,  -5,  1,  0 },
+		{ -1,  5, -15, 120,  25,  -8,  2,  0 },
+		{ -1,  6, -18, 114,  35, -10,  3, -1 },
+		{ -1,  6, -20, 107,  46, -13,  4, -1 },
+		{ -2,  7, -21,  99,  57, -16,  5, -1 },
+		{ -1,  6, -20,  89,  68, -18,  5, -1 },
+		{ -1,  6, -20,  79,  79, -20,  6, -1 },
+		{ -1,  5, -18,  68,  89, -20,  6, -1 },
+		{ -1,  5, -16,  57,  99, -21,  7, -2 },
+		{ -1,  4, -13,  46, 107, -20,  6, -1 },
+		{ -1,  3, -10,  35, 114, -18,  6, -1 },
+		{  0,  2,  -8,  25, 120, -15,  5, -1 },
+		{  0,  1,  -5,  16, 125, -12,  4, -1 },
+		{  0,  1,  -2,   7, 127,  -6,  2, -1 }
+	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
+		{  3, -8,  14, 111,  13,  -8,  3,  0 },
+		{  2, -6,   7, 112,  21, -10,  3, -1 },
+		{  2, -4,   1, 110,  28, -12,  4, -1 },
+		{  1, -2,  -3, 106,  36, -13,  4, -1 },
+		{  1, -1,  -7, 103,  44, -15,  4, -1 },
+		{  1,  1, -11,  97,  53, -16,  4, -1 },
+		{  0,  2, -13,  91,  61, -16,  4, -1 },
+		{  0,  3, -15,  85,  69, -17,  4, -1 },
+		{  0,  3, -16,  77,  77, -16,  3,  0 },
+		{ -1,  4, -17,  69,  85, -15,  3,  0 },
+		{ -1,  4, -16,  61,  91, -13,  2,  0 },
+		{ -1,  4, -16,  53,  97, -11,  1,  1 },
+		{ -1,  4, -15,  44, 103,  -7, -1,  1 },
+		{ -1,  4, -13,  36, 106,  -3, -2,  1 },
+		{ -1,  4, -12,  28, 110,   1, -4,  2 },
+		{ -1,  3, -10,  21, 112,   7, -6,  2 }
+	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
+		{ 2, -11,  25,  96, 25, -11,   2,  0 },
+		{ 2, -10,  19,  96, 31, -12,   2,  0 },
+		{ 2,  -9,  14,  94, 37, -12,   2,  0 },
+		{ 2,  -8,  10,  92, 43, -12,   1,  0 },
+		{ 2,  -7,   5,  90, 49, -12,   1,  0 },
+		{ 2,  -5,   1,  86, 55, -12,   0,  1 },
+		{ 2,  -4,  -2,  82, 61, -11,  -1,  1 },
+		{ 1,  -3,  -5,  77, 67,  -9,  -1,  1 },
+		{ 1,  -2,  -7,  72, 72,  -7,  -2,  1 },
+		{ 1,  -1,  -9,  67, 77,  -5,  -3,  1 },
+		{ 1,  -1, -11,  61, 82,  -2,  -4,  2 },
+		{ 1,   0, -12,  55, 86,   1,  -5,  2 },
+		{ 0,   1, -12,  49, 90,   5,  -7,  2 },
+		{ 0,   1, -12,  43, 92,  10,  -8,  2 },
+		{ 0,   2, -12,  37, 94,  14,  -9,  2 },
+		{ 0,   2, -12,  31, 96,  19, -10,  2 }
+	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
+		{ -1,  -8, 33,  80, 33,  -8,  -1,  0 },
+		{ -1,  -8, 28,  80, 37,  -7,  -2,  1 },
+		{  0,  -8, 24,  79, 41,  -7,  -2,  1 },
+		{  0,  -8, 20,  78, 46,  -6,  -3,  1 },
+		{  0,  -8, 16,  76, 50,  -4,  -3,  1 },
+		{  0,  -7, 13,  74, 54,  -3,  -4,  1 },
+		{  1,  -7, 10,  71, 58,  -1,  -5,  1 },
+		{  1,  -6,  6,  68, 62,   1,  -5,  1 },
+		{  1,  -6,  4,  65, 65,   4,  -6,  1 },
+		{  1,  -5,  1,  62, 68,   6,  -6,  1 },
+		{  1,  -5, -1,  58, 71,  10,  -7,  1 },
+		{  1,  -4, -3,  54, 74,  13,  -7,  0 },
+		{  1,  -3, -4,  50, 76,  16,  -8,  0 },
+		{  1,  -3, -6,  46, 78,  20,  -8,  0 },
+		{  1,  -2, -7,  41, 79,  24,  -8,  0 },
+		{  1,  -2, -7,  37, 80,  28,  -8, -1 }
+	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
+		{ -3,   0, 35,  64, 35,   0,  -3,  0 },
+		{ -3,  -1, 32,  64, 38,   1,  -3,  0 },
+		{ -2,  -2, 29,  63, 41,   2,  -3,  0 },
+		{ -2,  -3, 27,  63, 43,   4,  -4,  0 },
+		{ -2,  -3, 24,  61, 46,   6,  -4,  0 },
+		{ -2,  -3, 21,  60, 49,   7,  -4,  0 },
+		{ -1,  -4, 19,  59, 51,   9,  -4, -1 },
+		{ -1,  -4, 16,  57, 53,  12,  -4, -1 },
+		{ -1,  -4, 14,  55, 55,  14,  -4, -1 },
+		{ -1,  -4, 12,  53, 57,  16,  -4, -1 },
+		{ -1,  -4,  9,  51, 59,  19,  -4, -1 },
+		{  0,  -4,  7,  49, 60,  21,  -3, -2 },
+		{  0,  -4,  6,  46, 61,  24,  -3, -2 },
+		{  0,  -4,  4,  43, 63,  27,  -3, -2 },
+		{  0,  -3,  2,  41, 63,  29,  -2, -2 },
+		{  0,  -3,  1,  38, 64,  32,  -1, -3 }
+	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
+		{ -1,   8, 33,  48, 33,   8,  -1,  0 },
+		{ -1,   7, 31,  49, 35,   9,  -1, -1 },
+		{ -1,   6, 30,  49, 36,  10,  -1, -1 },
+		{ -1,   5, 28,  48, 38,  12,  -1, -1 },
+		{ -1,   4, 26,  48, 39,  13,   0, -1 },
+		{ -1,   3, 24,  47, 41,  15,   0, -1 },
+		{ -1,   2, 23,  47, 42,  16,   0, -1 },
+		{ -1,   2, 21,  45, 43,  18,   1, -1 },
+		{ -1,   1, 19,  45, 45,  19,   1, -1 },
+		{ -1,   1, 18,  43, 45,  21,   2, -1 },
+		{ -1,   0, 16,  42, 47,  23,   2, -1 },
+		{ -1,   0, 15,  41, 47,  24,   3, -1 },
+		{ -1,   0, 13,  39, 48,  26,   4, -1 },
+		{ -1,  -1, 12,  38, 48,  28,   5, -1 },
+		{ -1,  -1, 10,  36, 49,  30,   6, -1 },
+		{ -1,  -1,  9,  35, 49,  31,   7, -1 }
+	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
+		{  2,  13, 30,  38, 30,  13,   2,  0 },
+		{  2,  12, 29,  38, 30,  14,   3,  0 },
+		{  2,  11, 28,  38, 31,  15,   3,  0 },
+		{  2,  10, 26,  38, 32,  16,   4,  0 },
+		{  1,  10, 26,  37, 33,  17,   4,  0 },
+		{  1,   9, 24,  37, 34,  18,   5,  0 },
+		{  1,   8, 24,  37, 34,  19,   5,  0 },
+		{  1,   7, 22,  36, 35,  20,   6,  1 },
+		{  1,   6, 21,  36, 36,  21,   6,  1 },
+		{  1,   6, 20,  35, 36,  22,   7,  1 },
+		{  0,   5, 19,  34, 37,  24,   8,  1 },
+		{  0,   5, 18,  34, 37,  24,   9,  1 },
+		{  0,   4, 17,  33, 37,  26,  10,  1 },
+		{  0,   4, 16,  32, 38,  26,  10,  2 },
+		{  0,   3, 15,  31, 38,  28,  11,  2 },
+		{  0,   3, 14,  30, 38,  29,  12,  2 }
+	}
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+	{	/* Ratio <= 65536 (~8:8) */
+		{  0, 128,   0,  0 },
+		{ -4, 127,   5,  0 },
+		{ -6, 124,  11, -1 },
+		{ -8, 118,  19, -1 },
+		{ -8, 111,  27, -2 },
+		{ -8, 102,  37, -3 },
+		{ -8,  92,  48, -4 },
+		{ -7,  81,  59, -5 },
+		{ -6,  70,  70, -6 },
+		{ -5,  59,  81, -7 },
+		{ -4,  48,  92, -8 },
+		{ -3,  37, 102, -8 },
+		{ -2,  27, 111, -8 },
+		{ -1,  19, 118, -8 },
+		{ -1,  11, 124, -6 },
+		{  0,   5, 127, -4 }
+	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
+		{  8, 112,   8,  0 },
+		{  4, 111,  14, -1 },
+		{  1, 109,  20, -2 },
+		{ -2, 105,  27, -2 },
+		{ -3, 100,  34, -3 },
+		{ -5,  93,  43, -3 },
+		{ -5,  86,  51, -4 },
+		{ -5,  77,  60, -4 },
+		{ -5,  69,  69, -5 },
+		{ -4,  60,  77, -5 },
+		{ -4,  51,  86, -5 },
+		{ -3,  43,  93, -5 },
+		{ -3,  34, 100, -3 },
+		{ -2,  27, 105, -2 },
+		{ -2,  20, 109,  1 },
+		{ -1,  14, 111,  4 }
+	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
+		{ 16,  96,  16,  0 },
+		{ 12,  97,  21, -2 },
+		{  8,  96,  26, -2 },
+		{  5,  93,  32, -2 },
+		{  2,  89,  39, -2 },
+		{  0,  84,  46, -2 },
+		{ -1,  79,  53, -3 },
+		{ -2,  73,  59, -2 },
+		{ -2,  66,  66, -2 },
+		{ -2,  59,  73, -2 },
+		{ -3,  53,  79, -1 },
+		{ -2,  46,  84,  0 },
+		{ -2,  39,  89,  2 },
+		{ -2,  32,  93,  5 },
+		{ -2,  26,  96,  8 },
+		{ -2,  21,  97, 12 }
+	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
+		{ 22,  84,  22,  0 },
+		{ 18,  85,  26, -1 },
+		{ 14,  84,  31, -1 },
+		{ 11,  82,  36, -1 },
+		{  8,  79,  42, -1 },
+		{  6,  76,  47, -1 },
+		{  4,  72,  52,  0 },
+		{  2,  68,  58,  0 },
+		{  1,  63,  63,  1 },
+		{  0,  58,  68,  2 },
+		{  0,  52,  72,  4 },
+		{ -1,  47,  76,  6 },
+		{ -1,  42,  79,  8 },
+		{ -1,  36,  82, 11 },
+		{ -1,  31,  84, 14 },
+		{ -1,  26,  85, 18 }
+	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
+		{ 26,  76,  26,  0 },
+		{ 22,  76,  30,  0 },
+		{ 19,  75,  34,  0 },
+		{ 16,  73,  38,  1 },
+		{ 13,  71,  43,  1 },
+		{ 10,  69,  47,  2 },
+		{  8,  66,  51,  3 },
+		{  6,  63,  55,  4 },
+		{  5,  59,  59,  5 },
+		{  4,  55,  63,  6 },
+		{  3,  51,  66,  8 },
+		{  2,  47,  69, 10 },
+		{  1,  43,  71, 13 },
+		{  1,  38,  73, 16 },
+		{  0,  34,  75, 19 },
+		{  0,  30,  76, 22 }
+	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
+		{ 29,  70,  29,  0 },
+		{ 26,  68,  32,  2 },
+		{ 23,  67,  36,  2 },
+		{ 20,  66,  39,  3 },
+		{ 17,  65,  43,  3 },
+		{ 15,  63,  46,  4 },
+		{ 12,  61,  50,  5 },
+		{ 10,  58,  53,  7 },
+		{  8,  56,  56,  8 },
+		{  7,  53,  58, 10 },
+		{  5,  50,  61, 12 },
+		{  4,  46,  63, 15 },
+		{  3,  43,  65, 17 },
+		{  3,  39,  66, 20 },
+		{  2,  36,  67, 23 },
+		{  2,  32,  68, 26 }
+	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
+		{ 32,  64,  32,  0 },
+		{ 28,  63,  34,  3 },
+		{ 25,  62,  37,  4 },
+		{ 22,  62,  40,  4 },
+		{ 19,  61,  43,  5 },
+		{ 17,  59,  46,  6 },
+		{ 15,  58,  48,  7 },
+		{ 13,  55,  51,  9 },
+		{ 11,  53,  53, 11 },
+		{  9,  51,  55, 13 },
+		{  7,  48,  58, 15 },
+		{  6,  46,  59, 17 },
+		{  5,  43,  61, 19 },
+		{  4,  40,  62, 22 },
+		{  4,  37,  62, 25 },
+		{  3,  34,  63, 28 }
+	}
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+	u32 cfg;
+	int count = GSC_RESET_TIMEOUT;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* s/w reset */
+	cfg = (GSC_SW_RESET_SRESET);
+	gsc_write(cfg, GSC_SW_RESET);
+
+	/* wait s/w reset complete */
+	while (count--) {
+		cfg = gsc_read(GSC_SW_RESET);
+		if (!cfg)
+			break;
+		usleep_range(1000, 2000);
+	}
+
+	if (cfg) {
+		DRM_ERROR("failed to reset gsc h/w.\n");
+		return -EBUSY;
+	}
+
+	/* reset sequence */
+	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+	cfg |= (GSC_IN_BASE_ADDR_MASK |
+		GSC_IN_BASE_ADDR_PINGPONG(0));
+	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+	cfg |= (GSC_OUT_BASE_ADDR_MASK |
+		GSC_OUT_BASE_ADDR_PINGPONG(0));
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+	return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+	u32 gscblk_cfg;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+	if (enable)
+		gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+				GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+				GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+	else
+		gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+	writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+		bool overflow, bool done)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+			enable, overflow, done);
+
+	cfg = gsc_read(GSC_IRQ);
+	cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+	if (enable)
+		cfg |= GSC_IRQ_ENABLE;
+	else
+		cfg &= ~GSC_IRQ_ENABLE;
+
+	if (overflow)
+		cfg &= ~GSC_IRQ_OR_MASK;
+	else
+		cfg |= GSC_IRQ_OR_MASK;
+
+	if (done)
+		cfg &= ~GSC_IRQ_FRMDONE_MASK;
+	else
+		cfg |= GSC_IRQ_FRMDONE_MASK;
+
+	gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	cfg = gsc_read(GSC_IN_CON);
+	cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+		 GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+		 GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+		 GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+		cfg |= GSC_IN_RGB565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		cfg |= GSC_IN_XRGB8888;
+		break;
+	case DRM_FORMAT_BGRX8888:
+		cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+		break;
+	case DRM_FORMAT_YUYV:
+		cfg |= (GSC_IN_YUV422_1P |
+			GSC_IN_YUV422_1P_ORDER_LSB_Y |
+			GSC_IN_CHROMA_ORDER_CBCR);
+		break;
+	case DRM_FORMAT_YVYU:
+		cfg |= (GSC_IN_YUV422_1P |
+			GSC_IN_YUV422_1P_ORDER_LSB_Y |
+			GSC_IN_CHROMA_ORDER_CRCB);
+		break;
+	case DRM_FORMAT_UYVY:
+		cfg |= (GSC_IN_YUV422_1P |
+			GSC_IN_YUV422_1P_OEDER_LSB_C |
+			GSC_IN_CHROMA_ORDER_CBCR);
+		break;
+	case DRM_FORMAT_VYUY:
+		cfg |= (GSC_IN_YUV422_1P |
+			GSC_IN_YUV422_1P_OEDER_LSB_C |
+			GSC_IN_CHROMA_ORDER_CRCB);
+		break;
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+		cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+			GSC_IN_YUV420_2P);
+		break;
+	case DRM_FORMAT_YUV422:
+		cfg |= GSC_IN_YUV422_3P;
+		break;
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		cfg |= GSC_IN_YUV420_3P;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+		cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+			GSC_IN_YUV420_2P);
+		break;
+	case DRM_FORMAT_NV12MT:
+		cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	gsc_write(cfg, GSC_IN_CON);
+
+	return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+		degree, flip);
+
+	cfg = gsc_read(GSC_IN_CON);
+	cfg &= ~GSC_IN_ROT_MASK;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_0:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= GSC_IN_ROT_YFLIP;
+		break;
+	case EXYNOS_DRM_DEGREE_90:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= GSC_IN_ROT_90_XFLIP;
+		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= GSC_IN_ROT_90_YFLIP;
+		else
+			cfg |= GSC_IN_ROT_90;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		cfg |= GSC_IN_ROT_180;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		cfg |= GSC_IN_ROT_270;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+		return -EINVAL;
+	}
+
+	gsc_write(cfg, GSC_IN_CON);
+
+	ctx->rotation = cfg &
+		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+	*swap = ctx->rotation;
+
+	return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct drm_exynos_pos img_pos = *pos;
+	struct gsc_scaler *sc = &ctx->sc;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+		__func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+	if (swap) {
+		img_pos.w = pos->h;
+		img_pos.h = pos->w;
+	}
+
+	/* pixel offset */
+	cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+		GSC_SRCIMG_OFFSET_Y(img_pos.y));
+	gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+	/* cropped size */
+	cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+		GSC_CROPPED_HEIGHT(img_pos.h));
+	gsc_write(cfg, GSC_CROPPED_SIZE);
+
+	DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+		__func__, sz->hsize, sz->vsize);
+
+	/* original size */
+	cfg = gsc_read(GSC_SRCIMG_SIZE);
+	cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+		GSC_SRCIMG_WIDTH_MASK);
+
+	cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+		GSC_SRCIMG_HEIGHT(sz->vsize));
+
+	gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+	cfg = gsc_read(GSC_IN_CON);
+	cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+	DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+		__func__, pos->w, sc->range);
+
+	if (pos->w >= GSC_WIDTH_ITU_709)
+		if (sc->range)
+			cfg |= GSC_IN_RGB_HD_WIDE;
+		else
+			cfg |= GSC_IN_RGB_HD_NARROW;
+	else
+		if (sc->range)
+			cfg |= GSC_IN_RGB_SD_WIDE;
+		else
+			cfg |= GSC_IN_RGB_SD_NARROW;
+
+	gsc_write(cfg, GSC_IN_CON);
+
+	return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	bool masked;
+	u32 cfg;
+	u32 mask = 0x00000001 << buf_id;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+		buf_id, buf_type);
+
+	/* mask register set */
+	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		masked = false;
+		break;
+	case IPP_BUF_DEQUEUE:
+		masked = true;
+		break;
+	default:
+		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+		return -EINVAL;
+	}
+
+	/* sequence id */
+	cfg &= ~mask;
+	cfg |= masked << buf_id;
+	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+	return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+	struct drm_exynos_ipp_property *property;
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EFAULT;
+	}
+
+	property = &c_node->property;
+	if (!property) {
+		DRM_ERROR("failed to get property.\n");
+		return -EFAULT;
+	}
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+		property->prop_id, buf_id, buf_type);
+
+	if (buf_id > GSC_MAX_SRC) {
+		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+		return -EINVAL;
+	}
+
+	/* address register set */
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+			GSC_IN_BASE_ADDR_Y(buf_id));
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+			GSC_IN_BASE_ADDR_CB(buf_id));
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+			GSC_IN_BASE_ADDR_CR(buf_id));
+		break;
+	case IPP_BUF_DEQUEUE:
+		gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+		gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+		gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+	.set_fmt = gsc_src_set_fmt,
+	.set_transf = gsc_src_set_transf,
+	.set_size = gsc_src_set_size,
+	.set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	cfg = gsc_read(GSC_OUT_CON);
+	cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+		 GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+		 GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+		 GSC_OUT_GLOBAL_ALPHA_MASK);
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+		cfg |= GSC_OUT_RGB565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		cfg |= GSC_OUT_XRGB8888;
+		break;
+	case DRM_FORMAT_BGRX8888:
+		cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+		break;
+	case DRM_FORMAT_YUYV:
+		cfg |= (GSC_OUT_YUV422_1P |
+			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+			GSC_OUT_CHROMA_ORDER_CBCR);
+		break;
+	case DRM_FORMAT_YVYU:
+		cfg |= (GSC_OUT_YUV422_1P |
+			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+			GSC_OUT_CHROMA_ORDER_CRCB);
+		break;
+	case DRM_FORMAT_UYVY:
+		cfg |= (GSC_OUT_YUV422_1P |
+			GSC_OUT_YUV422_1P_OEDER_LSB_C |
+			GSC_OUT_CHROMA_ORDER_CBCR);
+		break;
+	case DRM_FORMAT_VYUY:
+		cfg |= (GSC_OUT_YUV422_1P |
+			GSC_OUT_YUV422_1P_OEDER_LSB_C |
+			GSC_OUT_CHROMA_ORDER_CRCB);
+		break;
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+		cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+		break;
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		cfg |= GSC_OUT_YUV420_3P;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+			GSC_OUT_YUV420_2P);
+		break;
+	case DRM_FORMAT_NV12MT:
+		cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	gsc_write(cfg, GSC_OUT_CON);
+
+	return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+		degree, flip);
+
+	cfg = gsc_read(GSC_IN_CON);
+	cfg &= ~GSC_IN_ROT_MASK;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_0:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= GSC_IN_ROT_YFLIP;
+		break;
+	case EXYNOS_DRM_DEGREE_90:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= GSC_IN_ROT_90_XFLIP;
+		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= GSC_IN_ROT_90_YFLIP;
+		else
+			cfg |= GSC_IN_ROT_90;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		cfg |= GSC_IN_ROT_180;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		cfg |= GSC_IN_ROT_270;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+		return -EINVAL;
+	}
+
+	gsc_write(cfg, GSC_IN_CON);
+
+	ctx->rotation = cfg &
+		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+	*swap = ctx->rotation;
+
+	return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+	DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+	if (src >= dst * 8) {
+		DRM_ERROR("failed to make ratio and shift.\n");
+		return -EINVAL;
+	} else if (src >= dst * 4)
+		*ratio = 4;
+	else if (src >= dst * 2)
+		*ratio = 2;
+	else
+		*ratio = 1;
+
+	return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+	if (hratio == 4 && vratio == 4)
+		*shfactor = 4;
+	else if ((hratio == 4 && vratio == 2) ||
+		 (hratio == 2 && vratio == 4))
+		*shfactor = 3;
+	else if ((hratio == 4 && vratio == 1) ||
+		 (hratio == 1 && vratio == 4) ||
+		 (hratio == 2 && vratio == 2))
+		*shfactor = 2;
+	else if (hratio == 1 && vratio == 1)
+		*shfactor = 0;
+	else
+		*shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+		struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+	u32 src_w, src_h, dst_w, dst_h;
+	int ret = 0;
+
+	src_w = src->w;
+	src_h = src->h;
+
+	if (ctx->rotation) {
+		dst_w = dst->h;
+		dst_h = dst->w;
+	} else {
+		dst_w = dst->w;
+		dst_h = dst->h;
+	}
+
+	ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+	if (ret) {
+		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+		return ret;
+	}
+
+	ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+	if (ret) {
+		dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+		return ret;
+	}
+
+	DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+		__func__, sc->pre_hratio, sc->pre_vratio);
+
+	sc->main_hratio = (src_w << 16) / dst_w;
+	sc->main_vratio = (src_h << 16) / dst_h;
+
+	DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+		__func__, sc->main_hratio, sc->main_vratio);
+
+	gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+		&sc->pre_shfactor);
+
+	DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+		sc->pre_shfactor);
+
+	cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+		GSC_PRESC_H_RATIO(sc->pre_hratio) |
+		GSC_PRESC_V_RATIO(sc->pre_vratio));
+	gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+	return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+	int i, j, k, sc_ratio;
+
+	if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+		sc_ratio = 0;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+		sc_ratio = 1;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+		sc_ratio = 2;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+		sc_ratio = 3;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+		sc_ratio = 4;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+		sc_ratio = 5;
+	else
+		sc_ratio = 6;
+
+	for (i = 0; i < GSC_COEF_PHASE; i++)
+		for (j = 0; j < GSC_COEF_H_8T; j++)
+			for (k = 0; k < GSC_COEF_DEPTH; k++)
+				gsc_write(h_coef_8t[sc_ratio][i][j],
+					GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+	int i, j, k, sc_ratio;
+
+	if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+		sc_ratio = 0;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+		sc_ratio = 1;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+		sc_ratio = 2;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+		sc_ratio = 3;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+		sc_ratio = 4;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+		sc_ratio = 5;
+	else
+		sc_ratio = 6;
+
+	for (i = 0; i < GSC_COEF_PHASE; i++)
+		for (j = 0; j < GSC_COEF_V_4T; j++)
+			for (k = 0; k < GSC_COEF_DEPTH; k++)
+				gsc_write(v_coef_4t[sc_ratio][i][j],
+					GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+		__func__, sc->main_hratio, sc->main_vratio);
+
+	gsc_set_h_coef(ctx, sc->main_hratio);
+	cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+	gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+	gsc_set_v_coef(ctx, sc->main_vratio);
+	cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+	gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct drm_exynos_pos img_pos = *pos;
+	struct gsc_scaler *sc = &ctx->sc;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+		__func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+	if (swap) {
+		img_pos.w = pos->h;
+		img_pos.h = pos->w;
+	}
+
+	/* pixel offset */
+	cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+		GSC_DSTIMG_OFFSET_Y(pos->y));
+	gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+	/* scaled size */
+	cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+	gsc_write(cfg, GSC_SCALED_SIZE);
+
+	DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+		__func__, sz->hsize, sz->vsize);
+
+	/* original size */
+	cfg = gsc_read(GSC_DSTIMG_SIZE);
+	cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+		GSC_DSTIMG_WIDTH_MASK);
+	cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+		GSC_DSTIMG_HEIGHT(sz->vsize));
+	gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+	cfg = gsc_read(GSC_OUT_CON);
+	cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+	DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+		__func__, pos->w, sc->range);
+
+	if (pos->w >= GSC_WIDTH_ITU_709)
+		if (sc->range)
+			cfg |= GSC_OUT_RGB_HD_WIDE;
+		else
+			cfg |= GSC_OUT_RGB_HD_NARROW;
+	else
+		if (sc->range)
+			cfg |= GSC_OUT_RGB_SD_WIDE;
+		else
+			cfg |= GSC_OUT_RGB_SD_NARROW;
+
+	gsc_write(cfg, GSC_OUT_CON);
+
+	return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+	u32 cfg, i, buf_num = GSC_REG_SZ;
+	u32 mask = 0x00000001;
+
+	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+	for (i = 0; i < GSC_REG_SZ; i++)
+		if (cfg & (mask << i))
+			buf_num--;
+
+	DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+	return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	bool masked;
+	u32 cfg;
+	u32 mask = 0x00000001 << buf_id;
+	int ret = 0;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+		buf_id, buf_type);
+
+	mutex_lock(&ctx->lock);
+
+	/* mask register set */
+	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		masked = false;
+		break;
+	case IPP_BUF_DEQUEUE:
+		masked = true;
+		break;
+	default:
+		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+		ret =  -EINVAL;
+		goto err_unlock;
+	}
+
+	/* sequence id */
+	cfg &= ~mask;
+	cfg |= masked << buf_id;
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+	/* interrupt enable */
+	if (buf_type == IPP_BUF_ENQUEUE &&
+	    gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+		gsc_handle_irq(ctx, true, false, true);
+
+	/* interrupt disable */
+	if (buf_type == IPP_BUF_DEQUEUE &&
+	    gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+		gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+	mutex_unlock(&ctx->lock);
+	return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+	struct drm_exynos_ipp_property *property;
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EFAULT;
+	}
+
+	property = &c_node->property;
+	if (!property) {
+		DRM_ERROR("failed to get property.\n");
+		return -EFAULT;
+	}
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+		property->prop_id, buf_id, buf_type);
+
+	if (buf_id > GSC_MAX_DST) {
+		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+		return -EINVAL;
+	}
+
+	/* address register set */
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+			GSC_OUT_BASE_ADDR_Y(buf_id));
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+			GSC_OUT_BASE_ADDR_CB(buf_id));
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+			GSC_OUT_BASE_ADDR_CR(buf_id));
+		break;
+	case IPP_BUF_DEQUEUE:
+		gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+		gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+		gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+	.set_fmt = gsc_dst_set_fmt,
+	.set_transf = gsc_dst_set_transf,
+	.set_size = gsc_dst_set_size,
+	.set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	if (enable) {
+		clk_enable(ctx->gsc_clk);
+		ctx->suspended = false;
+	} else {
+		clk_disable(ctx->gsc_clk);
+		ctx->suspended = true;
+	}
+
+	return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+	u32 cfg, curr_index, i;
+	u32 buf_id = GSC_MAX_SRC;
+	int ret;
+
+	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+	curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+	for (i = curr_index; i < GSC_MAX_SRC; i++) {
+		if (!((cfg >> i) & 0x1)) {
+			buf_id = i;
+			break;
+		}
+	}
+
+	if (buf_id == GSC_MAX_SRC) {
+		DRM_ERROR("failed to get in buffer index.\n");
+		return -EINVAL;
+	}
+
+	ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+	if (ret < 0) {
+		DRM_ERROR("failed to dequeue.\n");
+		return ret;
+	}
+
+	DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+		curr_index, buf_id);
+
+	return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+	u32 cfg, curr_index, i;
+	u32 buf_id = GSC_MAX_DST;
+	int ret;
+
+	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+	curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+	for (i = curr_index; i < GSC_MAX_DST; i++) {
+		if (!((cfg >> i) & 0x1)) {
+			buf_id = i;
+			break;
+		}
+	}
+
+	if (buf_id == GSC_MAX_DST) {
+		DRM_ERROR("failed to get out buffer index.\n");
+		return -EINVAL;
+	}
+
+	ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+	if (ret < 0) {
+		DRM_ERROR("failed to dequeue.\n");
+		return ret;
+	}
+
+	DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+		curr_index, buf_id);
+
+	return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+	struct gsc_context *ctx = dev_id;
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+	struct drm_exynos_ipp_event_work *event_work =
+		c_node->event_work;
+	u32 status;
+	int buf_id[EXYNOS_DRM_OPS_MAX];
+
+	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+	status = gsc_read(GSC_IRQ);
+	if (status & GSC_IRQ_STATUS_OR_IRQ) {
+		dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+			ctx->id, status);
+		return IRQ_NONE;
+	}
+
+	if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+		dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+			ctx->id, status);
+
+		buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+		if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+			return IRQ_HANDLED;
+
+		buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+		if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+			return IRQ_HANDLED;
+
+		DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+			buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+		event_work->ippdrv = ippdrv;
+		event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+			buf_id[EXYNOS_DRM_OPS_SRC];
+		event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+			buf_id[EXYNOS_DRM_OPS_DST];
+		queue_work(ippdrv->event_workq,
+			(struct work_struct *)event_work);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+	struct drm_exynos_ipp_prop_list *prop_list;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+	if (!prop_list) {
+		DRM_ERROR("failed to alloc property list.\n");
+		return -ENOMEM;
+	}
+
+	prop_list->version = 1;
+	prop_list->writeback = 1;
+	prop_list->refresh_min = GSC_REFRESH_MIN;
+	prop_list->refresh_max = GSC_REFRESH_MAX;
+	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+				(1 << EXYNOS_DRM_DEGREE_90) |
+				(1 << EXYNOS_DRM_DEGREE_180) |
+				(1 << EXYNOS_DRM_DEGREE_270);
+	prop_list->csc = 1;
+	prop_list->crop = 1;
+	prop_list->crop_max.hsize = GSC_CROP_MAX;
+	prop_list->crop_max.vsize = GSC_CROP_MAX;
+	prop_list->crop_min.hsize = GSC_CROP_MIN;
+	prop_list->crop_min.vsize = GSC_CROP_MIN;
+	prop_list->scale = 1;
+	prop_list->scale_max.hsize = GSC_SCALE_MAX;
+	prop_list->scale_max.vsize = GSC_SCALE_MAX;
+	prop_list->scale_min.hsize = GSC_SCALE_MIN;
+	prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+	ippdrv->prop_list = prop_list;
+
+	return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+	switch (flip) {
+	case EXYNOS_DRM_FLIP_NONE:
+	case EXYNOS_DRM_FLIP_VERTICAL:
+	case EXYNOS_DRM_FLIP_HORIZONTAL:
+	case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
+		return true;
+	default:
+		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+		return false;
+	}
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+		struct drm_exynos_ipp_property *property)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+	struct drm_exynos_ipp_config *config;
+	struct drm_exynos_pos *pos;
+	struct drm_exynos_sz *sz;
+	bool swap;
+	int i;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	for_each_ipp_ops(i) {
+		if ((i == EXYNOS_DRM_OPS_SRC) &&
+			(property->cmd == IPP_CMD_WB))
+			continue;
+
+		config = &property->config[i];
+		pos = &config->pos;
+		sz = &config->sz;
+
+		/* check for flip */
+		if (!gsc_check_drm_flip(config->flip)) {
+			DRM_ERROR("invalid flip.\n");
+			goto err_property;
+		}
+
+		/* check for degree */
+		switch (config->degree) {
+		case EXYNOS_DRM_DEGREE_90:
+		case EXYNOS_DRM_DEGREE_270:
+			swap = true;
+			break;
+		case EXYNOS_DRM_DEGREE_0:
+		case EXYNOS_DRM_DEGREE_180:
+			swap = false;
+			break;
+		default:
+			DRM_ERROR("invalid degree.\n");
+			goto err_property;
+		}
+
+		/* check for buffer bound */
+		if ((pos->x + pos->w > sz->hsize) ||
+			(pos->y + pos->h > sz->vsize)) {
+			DRM_ERROR("out of buf bound.\n");
+			goto err_property;
+		}
+
+		/* check for crop */
+		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+			if (swap) {
+				if ((pos->h < pp->crop_min.hsize) ||
+					(sz->vsize > pp->crop_max.hsize) ||
+					(pos->w < pp->crop_min.vsize) ||
+					(sz->hsize > pp->crop_max.vsize)) {
+					DRM_ERROR("out of crop size.\n");
+					goto err_property;
+				}
+			} else {
+				if ((pos->w < pp->crop_min.hsize) ||
+					(sz->hsize > pp->crop_max.hsize) ||
+					(pos->h < pp->crop_min.vsize) ||
+					(sz->vsize > pp->crop_max.vsize)) {
+					DRM_ERROR("out of crop size.\n");
+					goto err_property;
+				}
+			}
+		}
+
+		/* check for scale */
+		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+			if (swap) {
+				if ((pos->h < pp->scale_min.hsize) ||
+					(sz->vsize > pp->scale_max.hsize) ||
+					(pos->w < pp->scale_min.vsize) ||
+					(sz->hsize > pp->scale_max.vsize)) {
+					DRM_ERROR("out of scale size.\n");
+					goto err_property;
+				}
+			} else {
+				if ((pos->w < pp->scale_min.hsize) ||
+					(sz->hsize > pp->scale_max.hsize) ||
+					(pos->h < pp->scale_min.vsize) ||
+					(sz->vsize > pp->scale_max.vsize)) {
+					DRM_ERROR("out of scale size.\n");
+					goto err_property;
+				}
+			}
+		}
+	}
+
+	return 0;
+
+err_property:
+	for_each_ipp_ops(i) {
+		if ((i == EXYNOS_DRM_OPS_SRC) &&
+			(property->cmd == IPP_CMD_WB))
+			continue;
+
+		config = &property->config[i];
+		pos = &config->pos;
+		sz = &config->sz;
+
+		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+			i ? "dst" : "src", config->flip, config->degree,
+			pos->x, pos->y, pos->w, pos->h,
+			sz->hsize, sz->vsize);
+	}
+
+	return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct gsc_scaler *sc = &ctx->sc;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* reset h/w block */
+	ret = gsc_sw_reset(ctx);
+	if (ret < 0) {
+		dev_err(dev, "failed to reset hardware.\n");
+		return ret;
+	}
+
+	/* scaler setting */
+	memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+	sc->range = true;
+
+	return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+	struct drm_exynos_ipp_property *property;
+	struct drm_exynos_ipp_config *config;
+	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX];
+	struct drm_exynos_ipp_set_wb set_wb;
+	u32 cfg;
+	int ret, i;
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EINVAL;
+	}
+
+	property = &c_node->property;
+	if (!property) {
+		DRM_ERROR("failed to get property.\n");
+		return -EINVAL;
+	}
+
+	gsc_handle_irq(ctx, true, false, true);
+
+	for_each_ipp_ops(i) {
+		config = &property->config[i];
+		img_pos[i] = config->pos;
+	}
+
+	switch (cmd) {
+	case IPP_CMD_M2M:
+		/* enable one shot */
+		cfg = gsc_read(GSC_ENABLE);
+		cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+			GSC_ENABLE_CLK_GATE_MODE_MASK);
+		cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+		gsc_write(cfg, GSC_ENABLE);
+
+		/* src dma memory */
+		cfg = gsc_read(GSC_IN_CON);
+		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+		cfg |= GSC_IN_PATH_MEMORY;
+		gsc_write(cfg, GSC_IN_CON);
+
+		/* dst dma memory */
+		cfg = gsc_read(GSC_OUT_CON);
+		cfg |= GSC_OUT_PATH_MEMORY;
+		gsc_write(cfg, GSC_OUT_CON);
+		break;
+	case IPP_CMD_WB:
+		set_wb.enable = 1;
+		set_wb.refresh = property->refresh_rate;
+		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+		/* src local path */
+		cfg = readl(GSC_IN_CON);
+		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+		cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+		gsc_write(cfg, GSC_IN_CON);
+
+		/* dst dma memory */
+		cfg = gsc_read(GSC_OUT_CON);
+		cfg |= GSC_OUT_PATH_MEMORY;
+		gsc_write(cfg, GSC_OUT_CON);
+		break;
+	case IPP_CMD_OUTPUT:
+		/* src dma memory */
+		cfg = gsc_read(GSC_IN_CON);
+		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+		cfg |= GSC_IN_PATH_MEMORY;
+		gsc_write(cfg, GSC_IN_CON);
+
+		/* dst local path */
+		cfg = gsc_read(GSC_OUT_CON);
+		cfg |= GSC_OUT_PATH_MEMORY;
+		gsc_write(cfg, GSC_OUT_CON);
+		break;
+	default:
+		ret = -EINVAL;
+		dev_err(dev, "invalid operations.\n");
+		return ret;
+	}
+
+	ret = gsc_set_prescaler(ctx, &ctx->sc,
+		&img_pos[EXYNOS_DRM_OPS_SRC],
+		&img_pos[EXYNOS_DRM_OPS_DST]);
+	if (ret) {
+		dev_err(dev, "failed to set precalser.\n");
+		return ret;
+	}
+
+	gsc_set_scaler(ctx, &ctx->sc);
+
+	cfg = gsc_read(GSC_ENABLE);
+	cfg |= GSC_ENABLE_ON;
+	gsc_write(cfg, GSC_ENABLE);
+
+	return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+	switch (cmd) {
+	case IPP_CMD_M2M:
+		/* bypass */
+		break;
+	case IPP_CMD_WB:
+		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+		break;
+	case IPP_CMD_OUTPUT:
+	default:
+		dev_err(dev, "invalid operations.\n");
+		break;
+	}
+
+	gsc_handle_irq(ctx, false, false, true);
+
+	/* reset sequence */
+	gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+	gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+	gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+	cfg = gsc_read(GSC_ENABLE);
+	cfg &= ~GSC_ENABLE_ON;
+	gsc_write(cfg, GSC_ENABLE);
+}
+
+static int __devinit gsc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct gsc_context *ctx;
+	struct resource *res;
+	struct exynos_drm_ippdrv *ippdrv;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	/* clock control */
+	ctx->gsc_clk = clk_get(dev, "gscl");
+	if (IS_ERR(ctx->gsc_clk)) {
+		dev_err(dev, "failed to get gsc clock.\n");
+		ret = PTR_ERR(ctx->gsc_clk);
+		goto err_ctx;
+	}
+
+	/* resource memory */
+	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!ctx->regs_res) {
+		dev_err(dev, "failed to find registers.\n");
+		ret = -ENOENT;
+		goto err_clk;
+	}
+
+	ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+	if (!ctx->regs) {
+		dev_err(dev, "failed to map registers.\n");
+		ret = -ENXIO;
+		goto err_clk;
+	}
+
+	/* resource irq */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(dev, "failed to request irq resource.\n");
+		ret = -ENOENT;
+		goto err_get_regs;
+	}
+
+	ctx->irq = res->start;
+	ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+		IRQF_ONESHOT, "drm_gsc", ctx);
+	if (ret < 0) {
+		dev_err(dev, "failed to request irq.\n");
+		goto err_get_regs;
+	}
+
+	/* context initailization */
+	ctx->id = pdev->id;
+
+	ippdrv = &ctx->ippdrv;
+	ippdrv->dev = dev;
+	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+	ippdrv->check_property = gsc_ippdrv_check_property;
+	ippdrv->reset = gsc_ippdrv_reset;
+	ippdrv->start = gsc_ippdrv_start;
+	ippdrv->stop = gsc_ippdrv_stop;
+	ret = gsc_init_prop_list(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to init property list.\n");
+		goto err_get_irq;
+	}
+
+	DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+		(int)ippdrv);
+
+	mutex_init(&ctx->lock);
+	platform_set_drvdata(pdev, ctx);
+
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	ret = exynos_drm_ippdrv_register(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to register drm gsc device.\n");
+		goto err_ippdrv_register;
+	}
+
+	dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+	return 0;
+
+err_ippdrv_register:
+	devm_kfree(dev, ippdrv->prop_list);
+	pm_runtime_disable(dev);
+err_get_irq:
+	free_irq(ctx->irq, ctx);
+err_get_regs:
+	devm_iounmap(dev, ctx->regs);
+err_clk:
+	clk_put(ctx->gsc_clk);
+err_ctx:
+	devm_kfree(dev, ctx);
+	return ret;
+}
+
+static int __devexit gsc_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+	devm_kfree(dev, ippdrv->prop_list);
+	exynos_drm_ippdrv_unregister(ippdrv);
+	mutex_destroy(&ctx->lock);
+
+	pm_runtime_set_suspended(dev);
+	pm_runtime_disable(dev);
+
+	free_irq(ctx->irq, ctx);
+	devm_iounmap(dev, ctx->regs);
+
+	clk_put(ctx->gsc_clk);
+
+	devm_kfree(dev, ctx);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	if (!pm_runtime_suspended(dev))
+		return gsc_clk_ctrl(ctx, true);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	return  gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+	return  gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+	SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+	.probe		= gsc_probe,
+	.remove		= __devexit_p(gsc_remove),
+	.driver		= {
+		.name	= "exynos-drm-gsc",
+		.owner	= THIS_MODULE,
+		.pm	= &gsc_pm_ops,
+	},
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 000000000000..b3c3bc618c0f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b45185..55793c46e3c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -29,6 +29,9 @@
 #define get_ctx_from_subdrv(subdrv)	container_of(subdrv,\
 					struct drm_hdmi_context, subdrv);
 
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
 /* Common hdmi subdrv needs to access the hdmi and mixer though context.
 * These should be initialied by the repective drivers */
 static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
 	bool	enabled[MIXER_WIN_NR];
 };
 
+int exynos_platform_device_hdmi_register(void)
+{
+	if (exynos_drm_hdmi_pdev)
+		return -EEXIST;
+
+	exynos_drm_hdmi_pdev = platform_device_register_simple(
+			"exynos-drm-hdmi", -1, NULL, 0);
+	if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
+		return PTR_ERR(exynos_drm_hdmi_pdev);
+
+	return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+	if (exynos_drm_hdmi_pdev)
+		platform_device_unregister(exynos_drm_hdmi_pdev);
+}
+
 void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
 {
 	if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
 		return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
 }
 
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (mixer_ops && mixer_ops->wait_for_vblank)
+		mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
 static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
 				struct drm_connector *connector,
 				const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
 	.apply = drm_hdmi_apply,
 	.enable_vblank = drm_hdmi_enable_vblank,
 	.disable_vblank = drm_hdmi_disable_vblank,
+	.wait_for_vblank = drm_hdmi_wait_for_vblank,
 	.mode_fixup = drm_hdmi_mode_fixup,
 	.mode_set = drm_hdmi_mode_set,
 	.get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
 	ctx->enabled[win] = false;
 }
 
-static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
-{
-	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
-	DRM_DEBUG_KMS("%s\n", __FILE__);
-
-	if (mixer_ops && mixer_ops->wait_for_vblank)
-		mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
 static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
 	.mode_set = drm_mixer_mode_set,
 	.commit = drm_mixer_commit,
 	.disable = drm_mixer_disable,
-	.wait_for_vblank = drm_mixer_wait_for_vblank,
 };
 
 static struct exynos_drm_manager hdmi_manager = {
@@ -346,9 +368,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
 	ctx->hdmi_ctx->drm_dev = drm_dev;
 	ctx->mixer_ctx->drm_dev = drm_dev;
 
+	if (mixer_ops->iommu_on)
+		mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
+
 	return 0;
 }
 
+static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+	struct drm_hdmi_context *ctx;
+	struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+
+	ctx = get_ctx_from_subdrv(subdrv);
+
+	if (mixer_ops->iommu_on)
+		mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
+}
+
 static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
 	subdrv->dev = dev;
 	subdrv->manager = &hdmi_manager;
 	subdrv->probe = hdmi_subdrv_probe;
+	subdrv->remove = hdmi_subdrv_remove;
 
 	platform_set_drvdata(pdev, subdrv);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a059..fcc3093ec8fe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -62,12 +62,13 @@ struct exynos_hdmi_ops {
 
 struct exynos_mixer_ops {
 	/* manager */
+	int (*iommu_on)(void *ctx, bool enable);
 	int (*enable_vblank)(void *ctx, int pipe);
 	void (*disable_vblank)(void *ctx);
+	void (*wait_for_vblank)(void *ctx);
 	void (*dpms)(void *ctx, int mode);
 
 	/* overlay */
-	void (*wait_for_vblank)(void *ctx);
 	void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
 	void (*win_commit)(void *ctx, int zpos);
 	void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 000000000000..2482b7f96341
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,150 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+	struct dma_iommu_mapping *mapping = NULL;
+	struct exynos_drm_private *priv = drm_dev->dev_private;
+	struct device *dev = drm_dev->dev;
+
+	if (!priv->da_start)
+		priv->da_start = EXYNOS_DEV_ADDR_START;
+	if (!priv->da_space_size)
+		priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+	if (!priv->da_space_order)
+		priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+						priv->da_space_size,
+						priv->da_space_order);
+	if (IS_ERR(mapping))
+		return PTR_ERR(mapping);
+
+	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+					GFP_KERNEL);
+	dma_set_max_seg_size(dev, 0xffffffffu);
+	dev->archdata.mapping = mapping;
+
+	return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+	struct device *dev = drm_dev->dev;
+
+	arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev)
+{
+	struct device *dev = drm_dev->dev;
+	int ret;
+
+	if (!dev->archdata.mapping) {
+		DRM_ERROR("iommu_mapping is null.\n");
+		return -EFAULT;
+	}
+
+	subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+					sizeof(*subdrv_dev->dma_parms),
+					GFP_KERNEL);
+	dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+	ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("failed iommu attach.\n");
+		return ret;
+	}
+
+	/*
+	 * Set dma_ops to drm_device just one time.
+	 *
+	 * The dma mapping api needs device object and the api is used
+	 * to allocate physial memory and map it with iommu table.
+	 * If iommu attach succeeded, the sub driver would have dma_ops
+	 * for iommu and also all sub drivers have same dma_ops.
+	 */
+	if (!dev->archdata.dma_ops)
+		dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+	return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev)
+{
+	struct device *dev = drm_dev->dev;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+	if (!mapping || !mapping->domain)
+		return;
+
+	iommu_detach_device(mapping->domain, subdrv_dev);
+	drm_release_iommu_mapping(drm_dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 000000000000..18a0ca190b98
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,85 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START	0x20000000
+#define EXYNOS_DEV_ADDR_SIZE	0x40000000
+#define EXYNOS_DEV_ADDR_ORDER	0x4
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+				struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+	struct device *dev = drm_dev->dev;
+
+	return dev->archdata.mapping ? true : false;
+#else
+	return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+	return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+						struct device *subdrv_dev)
+{
+	return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+						struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+	return false;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..49eebe948ed2
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2060 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP is stand for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate	property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M)
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+	struct drm_pending_event	base;
+	struct drm_exynos_ipp_event	event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+	struct list_head	list;
+	enum drm_exynos_ops_id	ops_id;
+	u32	prop_id;
+	u32	buf_id;
+	struct drm_exynos_ipp_buf_info	buf_info;
+	struct drm_file		*filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+	struct exynos_drm_subdrv	subdrv;
+	struct mutex	ipp_lock;
+	struct mutex	prop_lock;
+	struct idr	ipp_idr;
+	struct idr	prop_idr;
+	struct workqueue_struct	*event_workq;
+	struct workqueue_struct	*cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ippdrv)
+		return -EINVAL;
+
+	mutex_lock(&exynos_drm_ippdrv_lock);
+	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+	mutex_unlock(&exynos_drm_ippdrv_lock);
+
+	return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ippdrv)
+		return -EINVAL;
+
+	mutex_lock(&exynos_drm_ippdrv_lock);
+	list_del(&ippdrv->drv_list);
+	mutex_unlock(&exynos_drm_ippdrv_lock);
+
+	return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+		u32 *idp)
+{
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+again:
+	/* ensure there is space available to allocate a handle */
+	if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
+		DRM_ERROR("failed to get idr.\n");
+		return -ENOMEM;
+	}
+
+	/* do the allocation under our mutexlock */
+	mutex_lock(lock);
+	ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+	mutex_unlock(lock);
+	if (ret == -EAGAIN)
+		goto again;
+
+	return ret;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+	void *obj;
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+	mutex_lock(lock);
+
+	/* find object using handle */
+	obj = idr_find(id_idr, id);
+	if (!obj) {
+		DRM_ERROR("failed to find object.\n");
+		mutex_unlock(lock);
+		return ERR_PTR(-ENODEV);
+	}
+
+	mutex_unlock(lock);
+
+	return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+		enum drm_exynos_ipp_cmd	cmd)
+{
+	/*
+	 * check dedicated flag and WB, OUTPUT operation with
+	 * power on state.
+	 */
+	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+	    !pm_runtime_suspended(ippdrv->dev)))
+		return true;
+
+	return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+		struct drm_exynos_ipp_property *property)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+	u32 ipp_id = property->ipp_id;
+
+	DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+	if (ipp_id) {
+		/* find ipp driver using idr */
+		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+			ipp_id);
+		if (IS_ERR_OR_NULL(ippdrv)) {
+			DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+			return ippdrv;
+		}
+
+		/*
+		 * WB, OUTPUT opertion not supported multi-operation.
+		 * so, make dedicated state at set property ioctl.
+		 * when ipp driver finished operations, clear dedicated flags.
+		 */
+		if (ipp_check_dedicated(ippdrv, property->cmd)) {
+			DRM_ERROR("already used choose device.\n");
+			return ERR_PTR(-EBUSY);
+		}
+
+		/*
+		 * This is necessary to find correct device in ipp drivers.
+		 * ipp drivers have different abilities,
+		 * so need to check property.
+		 */
+		if (ippdrv->check_property &&
+		    ippdrv->check_property(ippdrv->dev, property)) {
+			DRM_ERROR("not support property.\n");
+			return ERR_PTR(-EINVAL);
+		}
+
+		return ippdrv;
+	} else {
+		/*
+		 * This case is search all ipp driver for finding.
+		 * user application don't set ipp_id in this case,
+		 * so ipp subsystem search correct driver in driver list.
+		 */
+		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+			if (ipp_check_dedicated(ippdrv, property->cmd)) {
+				DRM_DEBUG_KMS("%s:used device.\n", __func__);
+				continue;
+			}
+
+			if (ippdrv->check_property &&
+			    ippdrv->check_property(ippdrv->dev, property)) {
+				DRM_DEBUG_KMS("%s:not support property.\n",
+					__func__);
+				continue;
+			}
+
+			return ippdrv;
+		}
+
+		DRM_ERROR("not support ipp driver operations.\n");
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+	if (list_empty(&exynos_drm_ippdrv_list)) {
+		DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+		return ERR_PTR(-ENODEV);
+	}
+
+	/*
+	 * This case is search ipp driver by prop_id handle.
+	 * sometimes, ipp subsystem find driver by prop_id.
+	 * e.g PAUSE state, queue buf, command contro.
+	 */
+	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+		DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+			count++, (int)ippdrv);
+
+		if (!list_empty(&ippdrv->cmd_list)) {
+			list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+				if (c_node->property.prop_id == prop_id)
+					return ippdrv;
+		}
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct device *dev = priv->dev;
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct drm_exynos_ipp_prop_list *prop_list = data;
+	struct exynos_drm_ippdrv *ippdrv;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ctx) {
+		DRM_ERROR("invalid context.\n");
+		return -EINVAL;
+	}
+
+	if (!prop_list) {
+		DRM_ERROR("invalid property parameter.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+	if (!prop_list->ipp_id) {
+		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+			count++;
+		/*
+		 * Supports ippdrv list count for user application.
+		 * First step user application getting ippdrv count.
+		 * and second step getting ippdrv capability using ipp_id.
+		 */
+		prop_list->count = count;
+	} else {
+		/*
+		 * Getting ippdrv capability by ipp_id.
+		 * some deivce not supported wb, output interface.
+		 * so, user application detect correct ipp driver
+		 * using this ioctl.
+		 */
+		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+						prop_list->ipp_id);
+		if (!ippdrv) {
+			DRM_ERROR("not found ipp%d driver.\n",
+					prop_list->ipp_id);
+			return -EINVAL;
+		}
+
+		prop_list = ippdrv->prop_list;
+	}
+
+	return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+		int idx)
+{
+	struct drm_exynos_ipp_config *config = &property->config[idx];
+	struct drm_exynos_pos *pos = &config->pos;
+	struct drm_exynos_sz *sz = &config->sz;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+		__func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+	DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+		__func__, pos->x, pos->y, pos->w, pos->h,
+		sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	u32 prop_id = property->prop_id;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+	ippdrv = ipp_find_drv_by_handle(prop_id);
+	if (IS_ERR_OR_NULL(ippdrv)) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Find command node using command list in ippdrv.
+	 * when we find this command no using prop_id.
+	 * return property information set in this command node.
+	 */
+	list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+		if ((c_node->property.prop_id == prop_id) &&
+		    (c_node->state == IPP_STATE_STOP)) {
+			DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+				__func__, property->cmd, (int)ippdrv);
+
+			c_node->property = *property;
+			return 0;
+		}
+	}
+
+	DRM_ERROR("failed to search property.\n");
+
+	return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+	struct drm_exynos_ipp_cmd_work *cmd_work;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+	if (!cmd_work) {
+		DRM_ERROR("failed to alloc cmd_work.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+	return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+	struct drm_exynos_ipp_event_work *event_work;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+	if (!event_work) {
+		DRM_ERROR("failed to alloc event_work.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+	return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct device *dev = priv->dev;
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct drm_exynos_ipp_property *property = data;
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	int ret, i;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ctx) {
+		DRM_ERROR("invalid context.\n");
+		return -EINVAL;
+	}
+
+	if (!property) {
+		DRM_ERROR("invalid property parameter.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * This is log print for user application property.
+	 * user application set various property.
+	 */
+	for_each_ipp_ops(i)
+		ipp_print_property(property, i);
+
+	/*
+	 * set property ioctl generated new prop_id.
+	 * but in this case already asigned prop_id using old set property.
+	 * e.g PAUSE state. this case supports find current prop_id and use it
+	 * instead of allocation.
+	 */
+	if (property->prop_id) {
+		DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+		return ipp_find_and_set_property(property);
+	}
+
+	/* find ipp driver using ipp id */
+	ippdrv = ipp_find_driver(ctx, property);
+	if (IS_ERR_OR_NULL(ippdrv)) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return -EINVAL;
+	}
+
+	/* allocate command node */
+	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+	if (!c_node) {
+		DRM_ERROR("failed to allocate map node.\n");
+		return -ENOMEM;
+	}
+
+	/* create property id */
+	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+		&property->prop_id);
+	if (ret) {
+		DRM_ERROR("failed to create id.\n");
+		goto err_clear;
+	}
+
+	DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+		__func__, property->prop_id, property->cmd, (int)ippdrv);
+
+	/* stored property information and ippdrv in private data */
+	c_node->priv = priv;
+	c_node->property = *property;
+	c_node->state = IPP_STATE_IDLE;
+
+	c_node->start_work = ipp_create_cmd_work();
+	if (IS_ERR_OR_NULL(c_node->start_work)) {
+		DRM_ERROR("failed to create start work.\n");
+		goto err_clear;
+	}
+
+	c_node->stop_work = ipp_create_cmd_work();
+	if (IS_ERR_OR_NULL(c_node->stop_work)) {
+		DRM_ERROR("failed to create stop work.\n");
+		goto err_free_start;
+	}
+
+	c_node->event_work = ipp_create_event_work();
+	if (IS_ERR_OR_NULL(c_node->event_work)) {
+		DRM_ERROR("failed to create event work.\n");
+		goto err_free_stop;
+	}
+
+	mutex_init(&c_node->cmd_lock);
+	mutex_init(&c_node->mem_lock);
+	mutex_init(&c_node->event_lock);
+
+	init_completion(&c_node->start_complete);
+	init_completion(&c_node->stop_complete);
+
+	for_each_ipp_ops(i)
+		INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+	INIT_LIST_HEAD(&c_node->event_list);
+	list_splice_init(&priv->event_list, &c_node->event_list);
+	list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+	/* make dedicated state without m2m */
+	if (!ipp_is_m2m_cmd(property->cmd))
+		ippdrv->dedicated = true;
+
+	return 0;
+
+err_free_stop:
+	kfree(c_node->stop_work);
+err_free_start:
+	kfree(c_node->start_work);
+err_clear:
+	kfree(c_node);
+	return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* delete list */
+	list_del(&c_node->list);
+
+	/* destroy mutex */
+	mutex_destroy(&c_node->cmd_lock);
+	mutex_destroy(&c_node->mem_lock);
+	mutex_destroy(&c_node->event_lock);
+
+	/* free command node */
+	kfree(c_node->start_work);
+	kfree(c_node->stop_work);
+	kfree(c_node->event_work);
+	kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+	struct drm_exynos_ipp_property *property = &c_node->property;
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct list_head *head;
+	int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	mutex_lock(&c_node->mem_lock);
+
+	for_each_ipp_ops(i) {
+		/* source/destination memory list */
+		head = &c_node->mem_list[i];
+
+		if (list_empty(head)) {
+			DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+				i ? "dst" : "src");
+			continue;
+		}
+
+		/* find memory node entry */
+		list_for_each_entry(m_node, head, list) {
+			DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+				i ? "dst" : "src", count[i], (int)m_node);
+			count[i]++;
+		}
+	}
+
+	DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+		min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+		max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+	/*
+	 * M2M operations should be need paired memory address.
+	 * so, need to check minimum count about src, dst.
+	 * other case not use paired memory, so use maximum count
+	 */
+	if (ipp_is_m2m_cmd(property->cmd))
+		ret = min(count[EXYNOS_DRM_OPS_SRC],
+			count[EXYNOS_DRM_OPS_DST]);
+	else
+		ret = max(count[EXYNOS_DRM_OPS_SRC],
+			count[EXYNOS_DRM_OPS_DST]);
+
+	mutex_unlock(&c_node->mem_lock);
+
+	return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct list_head *head;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+	/* source/destination memory list */
+	head = &c_node->mem_list[qbuf->ops_id];
+
+	/* find memory node from memory list */
+	list_for_each_entry(m_node, head, list) {
+		DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+			__func__, count++, (int)m_node);
+
+		/* compare buffer id */
+		if (m_node->buf_id == qbuf->buf_id)
+			return m_node;
+	}
+
+	return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_mem_node *m_node)
+{
+	struct exynos_drm_ipp_ops *ops = NULL;
+	int ret = 0;
+
+	DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+	if (!m_node) {
+		DRM_ERROR("invalid queue node.\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&c_node->mem_lock);
+
+	DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+	/* get operations callback */
+	ops = ippdrv->ops[m_node->ops_id];
+	if (!ops) {
+		DRM_ERROR("not support ops.\n");
+		ret = -EFAULT;
+		goto err_unlock;
+	}
+
+	/* set address and enable irq */
+	if (ops->set_addr) {
+		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+			m_node->buf_id, IPP_BUF_ENQUEUE);
+		if (ret) {
+			DRM_ERROR("failed to set addr.\n");
+			goto err_unlock;
+		}
+	}
+
+err_unlock:
+	mutex_unlock(&c_node->mem_lock);
+	return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+		*ipp_get_mem_node(struct drm_device *drm_dev,
+		struct drm_file *file,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct drm_exynos_ipp_buf_info buf_info;
+	void *addr;
+	int i;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	mutex_lock(&c_node->mem_lock);
+
+	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+	if (!m_node) {
+		DRM_ERROR("failed to allocate queue node.\n");
+		goto err_unlock;
+	}
+
+	/* clear base address for error handling */
+	memset(&buf_info, 0x0, sizeof(buf_info));
+
+	/* operations, buffer id */
+	m_node->ops_id = qbuf->ops_id;
+	m_node->prop_id = qbuf->prop_id;
+	m_node->buf_id = qbuf->buf_id;
+
+	DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+		(int)m_node, qbuf->ops_id);
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+		qbuf->prop_id, m_node->buf_id);
+
+	for_each_ipp_planar(i) {
+		DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+			i, qbuf->handle[i]);
+
+		/* get dma address by handle */
+		if (qbuf->handle[i]) {
+			addr = exynos_drm_gem_get_dma_addr(drm_dev,
+					qbuf->handle[i], file);
+			if (IS_ERR(addr)) {
+				DRM_ERROR("failed to get addr.\n");
+				goto err_clear;
+			}
+
+			buf_info.handles[i] = qbuf->handle[i];
+			buf_info.base[i] = *(dma_addr_t *) addr;
+			DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+				__func__, i, buf_info.base[i],
+				(int)buf_info.handles[i]);
+		}
+	}
+
+	m_node->filp = file;
+	m_node->buf_info = buf_info;
+	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+	mutex_unlock(&c_node->mem_lock);
+	return m_node;
+
+err_clear:
+	kfree(m_node);
+err_unlock:
+	mutex_unlock(&c_node->mem_lock);
+	return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_mem_node *m_node)
+{
+	int i;
+
+	DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+	if (!m_node) {
+		DRM_ERROR("invalid dequeue node.\n");
+		return -EFAULT;
+	}
+
+	if (list_empty(&m_node->list)) {
+		DRM_ERROR("empty memory node.\n");
+		return -ENOMEM;
+	}
+
+	mutex_lock(&c_node->mem_lock);
+
+	DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+	/* put gem buffer */
+	for_each_ipp_planar(i) {
+		unsigned long handle = m_node->buf_info.handles[i];
+		if (handle)
+			exynos_drm_gem_put_dma_addr(drm_dev, handle,
+							m_node->filp);
+	}
+
+	/* delete list in queue */
+	list_del(&m_node->list);
+	kfree(m_node);
+
+	mutex_unlock(&c_node->mem_lock);
+
+	return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+	kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+		struct drm_file *file,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_send_event *e;
+	unsigned long flags;
+
+	DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+		qbuf->ops_id, qbuf->buf_id);
+
+	e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+	if (!e) {
+		DRM_ERROR("failed to allocate event.\n");
+		spin_lock_irqsave(&drm_dev->event_lock, flags);
+		file->event_space += sizeof(e->event);
+		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+		return -ENOMEM;
+	}
+
+	/* make event */
+	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+	e->event.base.length = sizeof(e->event);
+	e->event.user_data = qbuf->user_data;
+	e->event.prop_id = qbuf->prop_id;
+	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+	e->base.event = &e->event.base;
+	e->base.file_priv = file;
+	e->base.destroy = ipp_free_event;
+	list_add_tail(&e->base.link, &c_node->event_list);
+
+	return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_send_event *e, *te;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (list_empty(&c_node->event_list)) {
+		DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+		return;
+	}
+
+	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+		DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+			__func__, count++, (int)e);
+
+		/*
+		 * quf == NULL condition means all event deletion.
+		 * stop operations want to delete all event list.
+		 * another case delete only same buf id.
+		 */
+		if (!qbuf) {
+			/* delete list */
+			list_del(&e->base.link);
+			kfree(e);
+		}
+
+		/* compare buffer id */
+		if (qbuf && (qbuf->buf_id ==
+		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+			/* delete list */
+			list_del(&e->base.link);
+			kfree(e);
+			return;
+		}
+	}
+}
+
+void ipp_handle_cmd_work(struct device *dev,
+		struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_work *cmd_work,
+		struct drm_exynos_ipp_cmd_node *c_node)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	cmd_work->ippdrv = ippdrv;
+	cmd_work->c_node = c_node;
+	queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_mem_node *m_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_property *property;
+	struct exynos_drm_ipp_ops *ops;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+	if (IS_ERR_OR_NULL(ippdrv)) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return -EFAULT;
+	}
+
+	ops = ippdrv->ops[qbuf->ops_id];
+	if (!ops) {
+		DRM_ERROR("failed to get ops.\n");
+		return -EFAULT;
+	}
+
+	property = &c_node->property;
+
+	if (c_node->state != IPP_STATE_START) {
+		DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+		return 0;
+	}
+
+	if (!ipp_check_mem_list(c_node)) {
+		DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+		return 0;
+	}
+
+	/*
+	 * If set destination buffer and enabled clock,
+	 * then m2m operations need start operations at queue_buf
+	 */
+	if (ipp_is_m2m_cmd(property->cmd)) {
+		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+		cmd_work->ctrl = IPP_CTRL_PLAY;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+	} else {
+		ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+		if (ret) {
+			DRM_ERROR("failed to set m node.\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+		/* delete list */
+		list_for_each_entry_safe(m_node, tm_node,
+			&c_node->mem_list[qbuf->ops_id], list) {
+			if (m_node->buf_id == qbuf->buf_id &&
+			    m_node->ops_id == qbuf->ops_id)
+				ipp_put_mem_node(drm_dev, c_node, m_node);
+		}
+	}
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct device *dev = priv->dev;
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct drm_exynos_ipp_queue_buf *qbuf = data;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	struct drm_exynos_ipp_mem_node *m_node;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!qbuf) {
+		DRM_ERROR("invalid buf parameter.\n");
+		return -EINVAL;
+	}
+
+	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+		DRM_ERROR("invalid ops parameter.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+		__func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+		qbuf->buf_id, qbuf->buf_type);
+
+	/* find command node */
+	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+		qbuf->prop_id);
+	if (!c_node) {
+		DRM_ERROR("failed to get command node.\n");
+		return -EFAULT;
+	}
+
+	/* buffer control */
+	switch (qbuf->buf_type) {
+	case IPP_BUF_ENQUEUE:
+		/* get memory node */
+		m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+		if (IS_ERR(m_node)) {
+			DRM_ERROR("failed to get m_node.\n");
+			return PTR_ERR(m_node);
+		}
+
+		/*
+		 * first step get event for destination buffer.
+		 * and second step when M2M case run with destination buffer
+		 * if needed.
+		 */
+		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+			/* get event for destination buffer */
+			ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+			if (ret) {
+				DRM_ERROR("failed to get event.\n");
+				goto err_clean_node;
+			}
+
+			/*
+			 * M2M case run play control for streaming feature.
+			 * other case set address and waiting.
+			 */
+			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+			if (ret) {
+				DRM_ERROR("failed to run command.\n");
+				goto err_clean_node;
+			}
+		}
+		break;
+	case IPP_BUF_DEQUEUE:
+		mutex_lock(&c_node->cmd_lock);
+
+		/* put event for destination buffer */
+		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+			ipp_put_event(c_node, qbuf);
+
+		ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+		mutex_unlock(&c_node->cmd_lock);
+		break;
+	default:
+		DRM_ERROR("invalid buffer control.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+
+err_clean_node:
+	DRM_ERROR("clean memory nodes.\n");
+
+	ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+	return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (ctrl != IPP_CTRL_PLAY) {
+		if (pm_runtime_suspended(dev)) {
+			DRM_ERROR("pm:runtime_suspended.\n");
+			goto err_status;
+		}
+	}
+
+	switch (ctrl) {
+	case IPP_CTRL_PLAY:
+		if (state != IPP_STATE_IDLE)
+			goto err_status;
+		break;
+	case IPP_CTRL_STOP:
+		if (state == IPP_STATE_STOP)
+			goto err_status;
+		break;
+	case IPP_CTRL_PAUSE:
+		if (state != IPP_STATE_START)
+			goto err_status;
+		break;
+	case IPP_CTRL_RESUME:
+		if (state != IPP_STATE_STOP)
+			goto err_status;
+		break;
+	default:
+		DRM_ERROR("invalid state.\n");
+		goto err_status;
+		break;
+	}
+
+	return true;
+
+err_status:
+	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+	return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct exynos_drm_ippdrv *ippdrv = NULL;
+	struct device *dev = priv->dev;
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+	struct drm_exynos_ipp_cmd_work *cmd_work;
+	struct drm_exynos_ipp_cmd_node *c_node;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ctx) {
+		DRM_ERROR("invalid context.\n");
+		return -EINVAL;
+	}
+
+	if (!cmd_ctrl) {
+		DRM_ERROR("invalid control parameter.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+	if (IS_ERR(ippdrv)) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return PTR_ERR(ippdrv);
+	}
+
+	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+		cmd_ctrl->prop_id);
+	if (!c_node) {
+		DRM_ERROR("invalid command node list.\n");
+		return -EINVAL;
+	}
+
+	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+	    c_node->state)) {
+		DRM_ERROR("invalid state.\n");
+		return -EINVAL;
+	}
+
+	switch (cmd_ctrl->ctrl) {
+	case IPP_CTRL_PLAY:
+		if (pm_runtime_suspended(ippdrv->dev))
+			pm_runtime_get_sync(ippdrv->dev);
+		c_node->state = IPP_STATE_START;
+
+		cmd_work = c_node->start_work;
+		cmd_work->ctrl = cmd_ctrl->ctrl;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+		c_node->state = IPP_STATE_START;
+		break;
+	case IPP_CTRL_STOP:
+		cmd_work = c_node->stop_work;
+		cmd_work->ctrl = cmd_ctrl->ctrl;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+		if (!wait_for_completion_timeout(&c_node->stop_complete,
+		    msecs_to_jiffies(300))) {
+			DRM_ERROR("timeout stop:prop_id[%d]\n",
+				c_node->property.prop_id);
+		}
+
+		c_node->state = IPP_STATE_STOP;
+		ippdrv->dedicated = false;
+		ipp_clean_cmd_node(c_node);
+
+		if (list_empty(&ippdrv->cmd_list))
+			pm_runtime_put_sync(ippdrv->dev);
+		break;
+	case IPP_CTRL_PAUSE:
+		cmd_work = c_node->stop_work;
+		cmd_work->ctrl = cmd_ctrl->ctrl;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+		if (!wait_for_completion_timeout(&c_node->stop_complete,
+		    msecs_to_jiffies(200))) {
+			DRM_ERROR("timeout stop:prop_id[%d]\n",
+				c_node->property.prop_id);
+		}
+
+		c_node->state = IPP_STATE_STOP;
+		break;
+	case IPP_CTRL_RESUME:
+		c_node->state = IPP_STATE_START;
+		cmd_work = c_node->start_work;
+		cmd_work->ctrl = cmd_ctrl->ctrl;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+		break;
+	default:
+		DRM_ERROR("could not support this state currently.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+	return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(
+		&exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(
+		&exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+	return blocking_notifier_call_chain(
+		&exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_property *property)
+{
+	struct exynos_drm_ipp_ops *ops = NULL;
+	bool swap = false;
+	int ret, i;
+
+	if (!property) {
+		DRM_ERROR("invalid property parameter.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+	/* reset h/w block */
+	if (ippdrv->reset &&
+	    ippdrv->reset(ippdrv->dev)) {
+		DRM_ERROR("failed to reset.\n");
+		return -EINVAL;
+	}
+
+	/* set source,destination operations */
+	for_each_ipp_ops(i) {
+		struct drm_exynos_ipp_config *config =
+			&property->config[i];
+
+		ops = ippdrv->ops[i];
+		if (!ops || !config) {
+			DRM_ERROR("not support ops and config.\n");
+			return -EINVAL;
+		}
+
+		/* set format */
+		if (ops->set_fmt) {
+			ret = ops->set_fmt(ippdrv->dev, config->fmt);
+			if (ret) {
+				DRM_ERROR("not support format.\n");
+				return ret;
+			}
+		}
+
+		/* set transform for rotation, flip */
+		if (ops->set_transf) {
+			ret = ops->set_transf(ippdrv->dev, config->degree,
+				config->flip, &swap);
+			if (ret) {
+				DRM_ERROR("not support tranf.\n");
+				return -EINVAL;
+			}
+		}
+
+		/* set size */
+		if (ops->set_size) {
+			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+				&config->sz);
+			if (ret) {
+				DRM_ERROR("not support size.\n");
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_node *c_node)
+{
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct drm_exynos_ipp_property *property = &c_node->property;
+	struct list_head *head;
+	int ret, i;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+	/* store command info in ippdrv */
+	ippdrv->cmd = c_node;
+
+	if (!ipp_check_mem_list(c_node)) {
+		DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* set current property in ippdrv */
+	ret = ipp_set_property(ippdrv, property);
+	if (ret) {
+		DRM_ERROR("failed to set property.\n");
+		ippdrv->cmd = NULL;
+		return ret;
+	}
+
+	/* check command */
+	switch (property->cmd) {
+	case IPP_CMD_M2M:
+		for_each_ipp_ops(i) {
+			/* source/destination memory list */
+			head = &c_node->mem_list[i];
+
+			m_node = list_first_entry(head,
+				struct drm_exynos_ipp_mem_node, list);
+			if (!m_node) {
+				DRM_ERROR("failed to get node.\n");
+				ret = -EFAULT;
+				return ret;
+			}
+
+			DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+				__func__, (int)m_node);
+
+			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to set m node.\n");
+				return ret;
+			}
+		}
+		break;
+	case IPP_CMD_WB:
+		/* destination memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+		list_for_each_entry(m_node, head, list) {
+			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to set m node.\n");
+				return ret;
+			}
+		}
+		break;
+	case IPP_CMD_OUTPUT:
+		/* source memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+		list_for_each_entry(m_node, head, list) {
+			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to set m node.\n");
+				return ret;
+			}
+		}
+		break;
+	default:
+		DRM_ERROR("invalid operations.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+	/* start operations */
+	if (ippdrv->start) {
+		ret = ippdrv->start(ippdrv->dev, property->cmd);
+		if (ret) {
+			DRM_ERROR("failed to start ops.\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+		struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_node *c_node)
+{
+	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+	struct drm_exynos_ipp_property *property = &c_node->property;
+	struct list_head *head;
+	int ret = 0, i;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+	/* put event */
+	ipp_put_event(c_node, NULL);
+
+	/* check command */
+	switch (property->cmd) {
+	case IPP_CMD_M2M:
+		for_each_ipp_ops(i) {
+			/* source/destination memory list */
+			head = &c_node->mem_list[i];
+
+			if (list_empty(head)) {
+				DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+					__func__);
+				break;
+			}
+
+			list_for_each_entry_safe(m_node, tm_node,
+				head, list) {
+				ret = ipp_put_mem_node(drm_dev, c_node,
+					m_node);
+				if (ret) {
+					DRM_ERROR("failed to put m_node.\n");
+					goto err_clear;
+				}
+			}
+		}
+		break;
+	case IPP_CMD_WB:
+		/* destination memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+		if (list_empty(head)) {
+			DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+			break;
+		}
+
+		list_for_each_entry_safe(m_node, tm_node, head, list) {
+			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to put m_node.\n");
+				goto err_clear;
+			}
+		}
+		break;
+	case IPP_CMD_OUTPUT:
+		/* source memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+		if (list_empty(head)) {
+			DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+			break;
+		}
+
+		list_for_each_entry_safe(m_node, tm_node, head, list) {
+			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to put m_node.\n");
+				goto err_clear;
+			}
+		}
+		break;
+	default:
+		DRM_ERROR("invalid operations.\n");
+		ret = -EINVAL;
+		goto err_clear;
+	}
+
+err_clear:
+	/* stop operations */
+	if (ippdrv->stop)
+		ippdrv->stop(ippdrv->dev, property->cmd);
+
+	return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+	struct drm_exynos_ipp_cmd_work *cmd_work =
+		(struct drm_exynos_ipp_cmd_work *)work;
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	struct drm_exynos_ipp_property *property;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	ippdrv = cmd_work->ippdrv;
+	if (!ippdrv) {
+		DRM_ERROR("invalid ippdrv list.\n");
+		return;
+	}
+
+	c_node = cmd_work->c_node;
+	if (!c_node) {
+		DRM_ERROR("invalid command node list.\n");
+		return;
+	}
+
+	mutex_lock(&c_node->cmd_lock);
+
+	property = &c_node->property;
+	if (!property) {
+		DRM_ERROR("failed to get property:prop_id[%d]\n",
+			c_node->property.prop_id);
+		goto err_unlock;
+	}
+
+	switch (cmd_work->ctrl) {
+	case IPP_CTRL_PLAY:
+	case IPP_CTRL_RESUME:
+		ret = ipp_start_property(ippdrv, c_node);
+		if (ret) {
+			DRM_ERROR("failed to start property:prop_id[%d]\n",
+				c_node->property.prop_id);
+			goto err_unlock;
+		}
+
+		/*
+		 * M2M case supports wait_completion of transfer.
+		 * because M2M case supports single unit operation
+		 * with multiple queue.
+		 * M2M need to wait completion of data transfer.
+		 */
+		if (ipp_is_m2m_cmd(property->cmd)) {
+			if (!wait_for_completion_timeout
+			    (&c_node->start_complete, msecs_to_jiffies(200))) {
+				DRM_ERROR("timeout event:prop_id[%d]\n",
+					c_node->property.prop_id);
+				goto err_unlock;
+			}
+		}
+		break;
+	case IPP_CTRL_STOP:
+	case IPP_CTRL_PAUSE:
+		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+			c_node);
+		if (ret) {
+			DRM_ERROR("failed to stop property.\n");
+			goto err_unlock;
+		}
+
+		complete(&c_node->stop_complete);
+		break;
+	default:
+		DRM_ERROR("unknown control type\n");
+		break;
+	}
+
+	DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+	mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+	struct drm_device *drm_dev = ippdrv->drm_dev;
+	struct drm_exynos_ipp_property *property = &c_node->property;
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct drm_exynos_ipp_queue_buf qbuf;
+	struct drm_exynos_ipp_send_event *e;
+	struct list_head *head;
+	struct timeval now;
+	unsigned long flags;
+	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+	int ret, i;
+
+	for_each_ipp_ops(i)
+		DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+			i ? "dst" : "src", buf_id[i]);
+
+	if (!drm_dev) {
+		DRM_ERROR("failed to get drm_dev.\n");
+		return -EINVAL;
+	}
+
+	if (!property) {
+		DRM_ERROR("failed to get property.\n");
+		return -EINVAL;
+	}
+
+	if (list_empty(&c_node->event_list)) {
+		DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+		return 0;
+	}
+
+	if (!ipp_check_mem_list(c_node)) {
+		DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+		return 0;
+	}
+
+	/* check command */
+	switch (property->cmd) {
+	case IPP_CMD_M2M:
+		for_each_ipp_ops(i) {
+			/* source/destination memory list */
+			head = &c_node->mem_list[i];
+
+			m_node = list_first_entry(head,
+				struct drm_exynos_ipp_mem_node, list);
+			if (!m_node) {
+				DRM_ERROR("empty memory node.\n");
+				return -ENOMEM;
+			}
+
+			tbuf_id[i] = m_node->buf_id;
+			DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+				i ? "dst" : "src", tbuf_id[i]);
+
+			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+			if (ret)
+				DRM_ERROR("failed to put m_node.\n");
+		}
+		break;
+	case IPP_CMD_WB:
+		/* clear buf for finding */
+		memset(&qbuf, 0x0, sizeof(qbuf));
+		qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+		/* get memory node entry */
+		m_node = ipp_find_mem_node(c_node, &qbuf);
+		if (!m_node) {
+			DRM_ERROR("empty memory node.\n");
+			return -ENOMEM;
+		}
+
+		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+		if (ret)
+			DRM_ERROR("failed to put m_node.\n");
+		break;
+	case IPP_CMD_OUTPUT:
+		/* source memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+		m_node = list_first_entry(head,
+			struct drm_exynos_ipp_mem_node, list);
+		if (!m_node) {
+			DRM_ERROR("empty memory node.\n");
+			return -ENOMEM;
+		}
+
+		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+		if (ret)
+			DRM_ERROR("failed to put m_node.\n");
+		break;
+	default:
+		DRM_ERROR("invalid operations.\n");
+		return -EINVAL;
+	}
+
+	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+			tbuf_id[1], buf_id[1], property->prop_id);
+
+	/*
+	 * command node have event list of destination buffer
+	 * If destination buffer enqueue to mem list,
+	 * then we make event and link to event list tail.
+	 * so, we get first event for first enqueued buffer.
+	 */
+	e = list_first_entry(&c_node->event_list,
+		struct drm_exynos_ipp_send_event, base.link);
+
+	if (!e) {
+		DRM_ERROR("empty event.\n");
+		return -EINVAL;
+	}
+
+	do_gettimeofday(&now);
+	DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+		, __func__, now.tv_sec, now.tv_usec);
+	e->event.tv_sec = now.tv_sec;
+	e->event.tv_usec = now.tv_usec;
+	e->event.prop_id = property->prop_id;
+
+	/* set buffer id about source destination */
+	for_each_ipp_ops(i)
+		e->event.buf_id[i] = tbuf_id[i];
+
+	spin_lock_irqsave(&drm_dev->event_lock, flags);
+	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+	wake_up_interruptible(&e->base.file_priv->event_wait);
+	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+	DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+	return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+	struct drm_exynos_ipp_event_work *event_work =
+		(struct drm_exynos_ipp_event_work *)work;
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	int ret;
+
+	if (!event_work) {
+		DRM_ERROR("failed to get event_work.\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+		event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+	ippdrv = event_work->ippdrv;
+	if (!ippdrv) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return;
+	}
+
+	c_node = ippdrv->cmd;
+	if (!c_node) {
+		DRM_ERROR("failed to get command node.\n");
+		return;
+	}
+
+	/*
+	 * IPP supports command thread, event thread synchronization.
+	 * If IPP close immediately from user land, then IPP make
+	 * synchronization with command thread, so make complete event.
+	 * or going out operations.
+	 */
+	if (c_node->state != IPP_STATE_START) {
+		DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+			__func__, c_node->state, c_node->property.prop_id);
+		goto err_completion;
+	}
+
+	mutex_lock(&c_node->event_lock);
+
+	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+	if (ret) {
+		DRM_ERROR("failed to send event.\n");
+		goto err_completion;
+	}
+
+err_completion:
+	if (ipp_is_m2m_cmd(c_node->property.cmd))
+		complete(&c_node->start_complete);
+
+	mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct exynos_drm_ippdrv *ippdrv;
+	int ret, count = 0;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* get ipp driver entry */
+	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+		ippdrv->drm_dev = drm_dev;
+
+		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+			&ippdrv->ipp_id);
+		if (ret) {
+			DRM_ERROR("failed to create id.\n");
+			goto err_idr;
+		}
+
+		DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+			count++, (int)ippdrv, ippdrv->ipp_id);
+
+		if (ippdrv->ipp_id == 0) {
+			DRM_ERROR("failed to get ipp_id[%d]\n",
+				ippdrv->ipp_id);
+			goto err_idr;
+		}
+
+		/* store parent device for node */
+		ippdrv->parent_dev = dev;
+
+		/* store event work queue and handler */
+		ippdrv->event_workq = ctx->event_workq;
+		ippdrv->sched_event = ipp_sched_event;
+		INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+		if (is_drm_iommu_supported(drm_dev)) {
+			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+			if (ret) {
+				DRM_ERROR("failed to activate iommu\n");
+				goto err_iommu;
+			}
+		}
+	}
+
+	return 0;
+
+err_iommu:
+	/* get ipp driver entry */
+	list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+		if (is_drm_iommu_supported(drm_dev))
+			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+	idr_remove_all(&ctx->ipp_idr);
+	idr_remove_all(&ctx->prop_idr);
+	idr_destroy(&ctx->ipp_idr);
+	idr_destroy(&ctx->prop_idr);
+	return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* get ipp driver entry */
+	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+		if (is_drm_iommu_supported(drm_dev))
+			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+		ippdrv->drm_dev = NULL;
+		exynos_drm_ippdrv_unregister(ippdrv);
+	}
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		DRM_ERROR("failed to allocate priv.\n");
+		return -ENOMEM;
+	}
+	priv->dev = dev;
+	file_priv->ipp_priv = priv;
+
+	INIT_LIST_HEAD(&priv->event_list);
+
+	DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+	return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct exynos_drm_ippdrv *ippdrv = NULL;
+	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+	if (list_empty(&exynos_drm_ippdrv_list)) {
+		DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+		goto err_clear;
+	}
+
+	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+		if (list_empty(&ippdrv->cmd_list))
+			continue;
+
+		list_for_each_entry_safe(c_node, tc_node,
+			&ippdrv->cmd_list, list) {
+			DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+				__func__, count++, (int)ippdrv);
+
+			if (c_node->priv == priv) {
+				/*
+				 * userland goto unnormal state. process killed.
+				 * and close the file.
+				 * so, IPP didn't called stop cmd ctrl.
+				 * so, we are make stop operation in this state.
+				 */
+				if (c_node->state == IPP_STATE_START) {
+					ipp_stop_property(drm_dev, ippdrv,
+						c_node);
+					c_node->state = IPP_STATE_STOP;
+				}
+
+				ippdrv->dedicated = false;
+				ipp_clean_cmd_node(c_node);
+				if (list_empty(&ippdrv->cmd_list))
+					pm_runtime_put_sync(ippdrv->dev);
+			}
+		}
+	}
+
+err_clear:
+	kfree(priv);
+	return;
+}
+
+static int __devinit ipp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ipp_context *ctx;
+	struct exynos_drm_subdrv *subdrv;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	mutex_init(&ctx->ipp_lock);
+	mutex_init(&ctx->prop_lock);
+
+	idr_init(&ctx->ipp_idr);
+	idr_init(&ctx->prop_idr);
+
+	/*
+	 * create single thread for ipp event
+	 * IPP supports event thread for IPP drivers.
+	 * IPP driver send event_work to this thread.
+	 * and IPP event thread send event to user process.
+	 */
+	ctx->event_workq = create_singlethread_workqueue("ipp_event");
+	if (!ctx->event_workq) {
+		dev_err(dev, "failed to create event workqueue\n");
+		ret = -EINVAL;
+		goto err_clear;
+	}
+
+	/*
+	 * create single thread for ipp command
+	 * IPP supports command thread for user process.
+	 * user process make command node using set property ioctl.
+	 * and make start_work and send this work to command thread.
+	 * and then this command thread start property.
+	 */
+	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+	if (!ctx->cmd_workq) {
+		dev_err(dev, "failed to create cmd workqueue\n");
+		ret = -EINVAL;
+		goto err_event_workq;
+	}
+
+	/* set sub driver informations */
+	subdrv = &ctx->subdrv;
+	subdrv->dev = dev;
+	subdrv->probe = ipp_subdrv_probe;
+	subdrv->remove = ipp_subdrv_remove;
+	subdrv->open = ipp_subdrv_open;
+	subdrv->close = ipp_subdrv_close;
+
+	platform_set_drvdata(pdev, ctx);
+
+	ret = exynos_drm_subdrv_register(subdrv);
+	if (ret < 0) {
+		DRM_ERROR("failed to register drm ipp device.\n");
+		goto err_cmd_workq;
+	}
+
+	dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+	return 0;
+
+err_cmd_workq:
+	destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+	destroy_workqueue(ctx->event_workq);
+err_clear:
+	kfree(ctx);
+	return ret;
+}
+
+static int __devexit ipp_remove(struct platform_device *pdev)
+{
+	struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* unregister sub driver */
+	exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+	/* remove,destroy ipp idr */
+	idr_remove_all(&ctx->ipp_idr);
+	idr_remove_all(&ctx->prop_idr);
+	idr_destroy(&ctx->ipp_idr);
+	idr_destroy(&ctx->prop_idr);
+
+	mutex_destroy(&ctx->ipp_lock);
+	mutex_destroy(&ctx->prop_lock);
+
+	/* destroy command, event work queue */
+	destroy_workqueue(ctx->cmd_workq);
+	destroy_workqueue(ctx->event_workq);
+
+	kfree(ctx);
+
+	return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!pm_runtime_suspended(dev))
+		return ipp_power_ctrl(ctx, true);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+	SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+	.probe		= ipp_probe,
+	.remove		= __devexit_p(ipp_remove),
+	.driver		= {
+		.name	= "exynos-drm-ipp",
+		.owner	= THIS_MODULE,
+		.pm	= &ipp_pm_ops,
+	},
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..28ffac95386c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos)	\
+	for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos)	\
+	for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH	_IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT	_IOR('F', 303, int)
+#define IPP_SET_WRITEBACK	_IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+	IPP_STATE_IDLE,
+	IPP_STATE_START,
+	IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+	struct work_struct	work;
+	struct exynos_drm_ippdrv	*ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	enum drm_exynos_ipp_ctrl	ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+	struct exynos_drm_ipp_private *priv;
+	struct list_head	list;
+	struct list_head	event_list;
+	struct list_head	mem_list[EXYNOS_DRM_OPS_MAX];
+	struct mutex	cmd_lock;
+	struct mutex	mem_lock;
+	struct mutex	event_lock;
+	struct completion	start_complete;
+	struct completion	stop_complete;
+	struct drm_exynos_ipp_property	property;
+	struct drm_exynos_ipp_cmd_work *start_work;
+	struct drm_exynos_ipp_cmd_work *stop_work;
+	struct drm_exynos_ipp_event_work *event_work;
+	enum drm_exynos_ipp_state	state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+	unsigned long	handles[EXYNOS_DRM_PLANAR_MAX];
+	dma_addr_t	base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+	__u32	enable;
+	__u32	refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+	struct work_struct	work;
+	struct exynos_drm_ippdrv *ippdrv;
+	u32	buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+	int (*set_fmt)(struct device *dev, u32 fmt);
+	int (*set_transf)(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap);
+	int (*set_size)(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+	int (*set_addr)(struct device *dev,
+		 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @cmd: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+	struct list_head	drv_list;
+	struct device	*parent_dev;
+	struct device	*dev;
+	struct drm_device	*drm_dev;
+	u32	ipp_id;
+	bool	dedicated;
+	struct exynos_drm_ipp_ops	*ops[EXYNOS_DRM_OPS_MAX];
+	struct workqueue_struct	*event_workq;
+	struct drm_exynos_ipp_cmd_node *cmd;
+	struct list_head	cmd_list;
+	struct drm_exynos_ipp_prop_list *prop_list;
+
+	int (*check_property)(struct device *dev,
+		struct drm_exynos_ipp_property *property);
+	int (*reset)(struct device *dev);
+	int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+	void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+	void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+					 struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+					 struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+					 struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+					 struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+						void *data,
+						struct drm_file *file_priv)
+{
+	return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+						void *data,
+						struct drm_file *file_priv)
+{
+	return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+						void *data,
+						struct drm_file *file)
+{
+	return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+						void *data,
+						struct drm_file *file)
+{
+	return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+	return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 862ca1eb2102..83efc662d65a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
  * CRTC ----------------
  *      ^ start        ^ end
  *
- * There are six cases from a to b.
+ * There are six cases from a to f.
  *
  *             <----- SCREEN ----->
  *             0                 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
 		}
 
 		overlay->dma_addr[i] = buffer->dma_addr;
-		overlay->vaddr[i] = buffer->kvaddr;
 
-		DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
-				i, (unsigned long)overlay->vaddr[i],
-				(unsigned long)overlay->dma_addr[i]);
+		DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+				i, (unsigned long)overlay->dma_addr[i]);
 	}
 
 	actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
 	if (crtc_x < 0) {
 		if (actual_w)
 			src_x -= crtc_x;
-		else
-			src_x += crtc_w;
 		crtc_x = 0;
 	}
 
 	if (crtc_y < 0) {
 		if (actual_h)
 			src_y -= crtc_y;
-		else
-			src_y += crtc_h;
 		crtc_y = 0;
 	}
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 000000000000..1c2366083c70
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,855 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *	YoungJun Cho <yj44.cho@samsung.com>
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev)	platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
+					struct rot_context, ippdrv);
+#define rot_read(offset)		readl(rot->regs + (offset))
+#define rot_write(cfg, offset)	writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+	ROT_IRQ_STATUS_COMPLETE	= 8,
+	ROT_IRQ_STATUS_ILLEGAL	= 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+	u32	min_w;
+	u32	min_h;
+	u32	max_w;
+	u32	max_h;
+	u32	align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+	struct rot_limit	ycbcr420_2p;
+	struct rot_limit	rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+	struct exynos_drm_ippdrv	ippdrv;
+	struct resource	*regs_res;
+	void __iomem	*regs;
+	struct clk	*clock;
+	struct rot_limit_table	*limit_tbl;
+	int	irq;
+	int	cur_buf_id[EXYNOS_DRM_OPS_MAX];
+	bool	suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+	u32 val = rot_read(ROT_CONFIG);
+
+	if (enable == true)
+		val |= ROT_CONFIG_IRQ;
+	else
+		val &= ~ROT_CONFIG_IRQ;
+
+	rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+	u32 val = rot_read(ROT_CONTROL);
+
+	val &= ROT_CONTROL_FMT_MASK;
+
+	return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+	u32 val = rot_read(ROT_STATUS);
+
+	val = ROT_STATUS_IRQ(val);
+
+	if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+		return ROT_IRQ_STATUS_COMPLETE;
+
+	return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+	struct rot_context *rot = arg;
+	struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+	struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+	enum rot_irq_status irq_status;
+	u32 val;
+
+	/* Get execution result */
+	irq_status = rotator_reg_get_irq_status(rot);
+
+	/* clear status */
+	val = rot_read(ROT_STATUS);
+	val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+	rot_write(val, ROT_STATUS);
+
+	if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+		event_work->ippdrv = ippdrv;
+		event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+			rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+		queue_work(ippdrv->event_workq,
+			(struct work_struct *)event_work);
+	} else
+		DRM_ERROR("the SFR is set illegally\n");
+
+	return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+		u32 *vsize)
+{
+	struct rot_limit_table *limit_tbl = rot->limit_tbl;
+	struct rot_limit *limit;
+	u32 mask, val;
+
+	/* Get size limit */
+	if (fmt == ROT_CONTROL_FMT_RGB888)
+		limit = &limit_tbl->rgb888;
+	else
+		limit = &limit_tbl->ycbcr420_2p;
+
+	/* Get mask for rounding to nearest aligned val */
+	mask = ~((1 << limit->align) - 1);
+
+	/* Set aligned width */
+	val = ROT_ALIGN(*hsize, limit->align, mask);
+	if (val < limit->min_w)
+		*hsize = ROT_MIN(limit->min_w, mask);
+	else if (val > limit->max_w)
+		*hsize = ROT_MAX(limit->max_w, mask);
+	else
+		*hsize = val;
+
+	/* Set aligned height */
+	val = ROT_ALIGN(*vsize, limit->align, mask);
+	if (val < limit->min_h)
+		*vsize = ROT_MIN(limit->min_h, mask);
+	else if (val > limit->max_h)
+		*vsize = ROT_MAX(limit->max_h, mask);
+	else
+		*vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 val;
+
+	val = rot_read(ROT_CONTROL);
+	val &= ~ROT_CONTROL_FMT_MASK;
+
+	switch (fmt) {
+	case DRM_FORMAT_NV12:
+		val |= ROT_CONTROL_FMT_YCBCR420_2P;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		val |= ROT_CONTROL_FMT_RGB888;
+		break;
+	default:
+		DRM_ERROR("invalid image format\n");
+		return -EINVAL;
+	}
+
+	rot_write(val, ROT_CONTROL);
+
+	return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+	if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+	    (fmt == ROT_CONTROL_FMT_RGB888))
+		return true;
+
+	return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos,
+		struct drm_exynos_sz *sz)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 fmt, hsize, vsize;
+	u32 val;
+
+	/* Get format */
+	fmt = rotator_reg_get_fmt(rot);
+	if (!rotator_check_reg_fmt(fmt)) {
+		DRM_ERROR("%s:invalid format.\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Align buffer size */
+	hsize = sz->hsize;
+	vsize = sz->vsize;
+	rotator_align_size(rot, fmt, &hsize, &vsize);
+
+	/* Set buffer size configuration */
+	val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+	rot_write(val, ROT_SRC_BUF_SIZE);
+
+	/* Set crop image position configuration */
+	val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+	rot_write(val, ROT_SRC_CROP_POS);
+	val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+	rot_write(val, ROT_SRC_CROP_SIZE);
+
+	return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info,
+		u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+	u32 val, fmt, hsize, vsize;
+	int i;
+
+	/* Set current buf_id */
+	rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		/* Set address configuration */
+		for_each_ipp_planar(i)
+			addr[i] = buf_info->base[i];
+
+		/* Get format */
+		fmt = rotator_reg_get_fmt(rot);
+		if (!rotator_check_reg_fmt(fmt)) {
+			DRM_ERROR("%s:invalid format.\n", __func__);
+			return -EINVAL;
+		}
+
+		/* Re-set cb planar for NV12 format */
+		if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+		    !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+			val = rot_read(ROT_SRC_BUF_SIZE);
+			hsize = ROT_GET_BUF_SIZE_W(val);
+			vsize = ROT_GET_BUF_SIZE_H(val);
+
+			/* Set cb planar */
+			addr[EXYNOS_DRM_PLANAR_CB] =
+				addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+		}
+
+		for_each_ipp_planar(i)
+			rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+		break;
+	case IPP_BUF_DEQUEUE:
+		for_each_ipp_planar(i)
+			rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+		break;
+	default:
+		/* Nothing to do */
+		break;
+	}
+
+	return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 val;
+
+	/* Set transform configuration */
+	val = rot_read(ROT_CONTROL);
+	val &= ~ROT_CONTROL_FLIP_MASK;
+
+	switch (flip) {
+	case EXYNOS_DRM_FLIP_VERTICAL:
+		val |= ROT_CONTROL_FLIP_VERTICAL;
+		break;
+	case EXYNOS_DRM_FLIP_HORIZONTAL:
+		val |= ROT_CONTROL_FLIP_HORIZONTAL;
+		break;
+	default:
+		/* Flip None */
+		break;
+	}
+
+	val &= ~ROT_CONTROL_ROT_MASK;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_90:
+		val |= ROT_CONTROL_ROT_90;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		val |= ROT_CONTROL_ROT_180;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		val |= ROT_CONTROL_ROT_270;
+		break;
+	default:
+		/* Rotation 0 Degree */
+		break;
+	}
+
+	rot_write(val, ROT_CONTROL);
+
+	/* Check degree for setting buffer size swap */
+	if ((degree == EXYNOS_DRM_DEGREE_90) ||
+	    (degree == EXYNOS_DRM_DEGREE_270))
+		*swap = true;
+	else
+		*swap = false;
+
+	return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos,
+		struct drm_exynos_sz *sz)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 val, fmt, hsize, vsize;
+
+	/* Get format */
+	fmt = rotator_reg_get_fmt(rot);
+	if (!rotator_check_reg_fmt(fmt)) {
+		DRM_ERROR("%s:invalid format.\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Align buffer size */
+	hsize = sz->hsize;
+	vsize = sz->vsize;
+	rotator_align_size(rot, fmt, &hsize, &vsize);
+
+	/* Set buffer size configuration */
+	val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+	rot_write(val, ROT_DST_BUF_SIZE);
+
+	/* Set crop image position configuration */
+	val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+	rot_write(val, ROT_DST_CROP_POS);
+
+	return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info,
+		u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+	u32 val, fmt, hsize, vsize;
+	int i;
+
+	/* Set current buf_id */
+	rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		/* Set address configuration */
+		for_each_ipp_planar(i)
+			addr[i] = buf_info->base[i];
+
+		/* Get format */
+		fmt = rotator_reg_get_fmt(rot);
+		if (!rotator_check_reg_fmt(fmt)) {
+			DRM_ERROR("%s:invalid format.\n", __func__);
+			return -EINVAL;
+		}
+
+		/* Re-set cb planar for NV12 format */
+		if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+		    !addr[EXYNOS_DRM_PLANAR_CB]) {
+			/* Get buf size */
+			val = rot_read(ROT_DST_BUF_SIZE);
+
+			hsize = ROT_GET_BUF_SIZE_W(val);
+			vsize = ROT_GET_BUF_SIZE_H(val);
+
+			/* Set cb planar */
+			addr[EXYNOS_DRM_PLANAR_CB] =
+				addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+		}
+
+		for_each_ipp_planar(i)
+			rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+		break;
+	case IPP_BUF_DEQUEUE:
+		for_each_ipp_planar(i)
+			rot_write(0x0, ROT_DST_BUF_ADDR(i));
+		break;
+	default:
+		/* Nothing to do */
+		break;
+	}
+
+	return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+	.set_fmt	=	rotator_src_set_fmt,
+	.set_size	=	rotator_src_set_size,
+	.set_addr	=	rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+	.set_transf	=	rotator_dst_set_transf,
+	.set_size	=	rotator_dst_set_size,
+	.set_addr	=	rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+	struct drm_exynos_ipp_prop_list *prop_list;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+	if (!prop_list) {
+		DRM_ERROR("failed to alloc property list.\n");
+		return -ENOMEM;
+	}
+
+	prop_list->version = 1;
+	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+				(1 << EXYNOS_DRM_DEGREE_90) |
+				(1 << EXYNOS_DRM_DEGREE_180) |
+				(1 << EXYNOS_DRM_DEGREE_270);
+	prop_list->csc = 0;
+	prop_list->crop = 0;
+	prop_list->scale = 0;
+
+	ippdrv->prop_list = prop_list;
+
+	return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+	switch (fmt) {
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_NV12:
+		return true;
+	default:
+		DRM_DEBUG_KMS("%s:not support format\n", __func__);
+		return false;
+	}
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+	switch (flip) {
+	case EXYNOS_DRM_FLIP_NONE:
+	case EXYNOS_DRM_FLIP_VERTICAL:
+	case EXYNOS_DRM_FLIP_HORIZONTAL:
+		return true;
+	default:
+		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+		return false;
+	}
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+		struct drm_exynos_ipp_property *property)
+{
+	struct drm_exynos_ipp_config *src_config =
+					&property->config[EXYNOS_DRM_OPS_SRC];
+	struct drm_exynos_ipp_config *dst_config =
+					&property->config[EXYNOS_DRM_OPS_DST];
+	struct drm_exynos_pos *src_pos = &src_config->pos;
+	struct drm_exynos_pos *dst_pos = &dst_config->pos;
+	struct drm_exynos_sz *src_sz = &src_config->sz;
+	struct drm_exynos_sz *dst_sz = &dst_config->sz;
+	bool swap = false;
+
+	/* Check format configuration */
+	if (src_config->fmt != dst_config->fmt) {
+		DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!rotator_check_drm_fmt(dst_config->fmt)) {
+		DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Check transform configuration */
+	if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+		DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (dst_config->degree) {
+	case EXYNOS_DRM_DEGREE_90:
+	case EXYNOS_DRM_DEGREE_270:
+		swap = true;
+	case EXYNOS_DRM_DEGREE_0:
+	case EXYNOS_DRM_DEGREE_180:
+		/* No problem */
+		break;
+	default:
+		DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+		return -EINVAL;
+	}
+
+	if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+		DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!rotator_check_drm_flip(dst_config->flip)) {
+		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Check size configuration */
+	if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+		(src_pos->y + src_pos->h > src_sz->vsize)) {
+		DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+		return -EINVAL;
+	}
+
+	if (swap) {
+		if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+			(dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+			DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+			DRM_DEBUG_KMS("%s:not support scale feature\n",
+				__func__);
+			return -EINVAL;
+		}
+	} else {
+		if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+			(dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+			DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+			DRM_DEBUG_KMS("%s:not support scale feature\n",
+				__func__);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 val;
+
+	if (rot->suspended) {
+		DRM_ERROR("suspended state\n");
+		return -EPERM;
+	}
+
+	if (cmd != IPP_CMD_M2M) {
+		DRM_ERROR("not support cmd: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* Set interrupt enable */
+	rotator_reg_set_irq(rot, true);
+
+	val = rot_read(ROT_CONTROL);
+	val |= ROT_CONTROL_START;
+
+	rot_write(val, ROT_CONTROL);
+
+	return 0;
+}
+
+static int __devinit rotator_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rot_context *rot;
+	struct exynos_drm_ippdrv *ippdrv;
+	int ret;
+
+	rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+	if (!rot) {
+		dev_err(dev, "failed to allocate rot\n");
+		return -ENOMEM;
+	}
+
+	rot->limit_tbl = (struct rot_limit_table *)
+				platform_get_device_id(pdev)->driver_data;
+
+	rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!rot->regs_res) {
+		dev_err(dev, "failed to find registers\n");
+		ret = -ENOENT;
+		goto err_get_resource;
+	}
+
+	rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
+	if (!rot->regs) {
+		dev_err(dev, "failed to map register\n");
+		ret = -ENXIO;
+		goto err_get_resource;
+	}
+
+	rot->irq = platform_get_irq(pdev, 0);
+	if (rot->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		ret = rot->irq;
+		goto err_get_irq;
+	}
+
+	ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
+			IRQF_ONESHOT, "drm_rotator", rot);
+	if (ret < 0) {
+		dev_err(dev, "failed to request irq\n");
+		goto err_get_irq;
+	}
+
+	rot->clock = clk_get(dev, "rotator");
+	if (IS_ERR_OR_NULL(rot->clock)) {
+		dev_err(dev, "failed to get clock\n");
+		ret = PTR_ERR(rot->clock);
+		goto err_clk_get;
+	}
+
+	pm_runtime_enable(dev);
+
+	ippdrv = &rot->ippdrv;
+	ippdrv->dev = dev;
+	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+	ippdrv->check_property = rotator_ippdrv_check_property;
+	ippdrv->start = rotator_ippdrv_start;
+	ret = rotator_init_prop_list(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to init property list.\n");
+		goto err_ippdrv_register;
+	}
+
+	DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
+
+	platform_set_drvdata(pdev, rot);
+
+	ret = exynos_drm_ippdrv_register(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to register drm rotator device\n");
+		goto err_ippdrv_register;
+	}
+
+	dev_info(dev, "The exynos rotator is probed successfully\n");
+
+	return 0;
+
+err_ippdrv_register:
+	devm_kfree(dev, ippdrv->prop_list);
+	pm_runtime_disable(dev);
+	clk_put(rot->clock);
+err_clk_get:
+	free_irq(rot->irq, rot);
+err_get_irq:
+	devm_iounmap(dev, rot->regs);
+err_get_resource:
+	devm_kfree(dev, rot);
+	return ret;
+}
+
+static int __devexit rotator_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rot_context *rot = dev_get_drvdata(dev);
+	struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+	devm_kfree(dev, ippdrv->prop_list);
+	exynos_drm_ippdrv_unregister(ippdrv);
+
+	pm_runtime_disable(dev);
+	clk_put(rot->clock);
+
+	free_irq(rot->irq, rot);
+	devm_iounmap(dev, rot->regs);
+
+	devm_kfree(dev, rot);
+
+	return 0;
+}
+
+struct rot_limit_table rot_limit_tbl = {
+	.ycbcr420_2p = {
+		.min_w = 32,
+		.min_h = 32,
+		.max_w = SZ_32K,
+		.max_h = SZ_32K,
+		.align = 3,
+	},
+	.rgb888 = {
+		.min_w = 8,
+		.min_h = 8,
+		.max_w = SZ_8K,
+		.max_h = SZ_8K,
+		.align = 2,
+	},
+};
+
+struct platform_device_id rotator_driver_ids[] = {
+	{
+		.name		= "exynos-rot",
+		.driver_data	= (unsigned long)&rot_limit_tbl,
+	},
+	{},
+};
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (enable) {
+		clk_enable(rot->clock);
+		rot->suspended = false;
+	} else {
+		clk_disable(rot->clock);
+		rot->suspended = true;
+	}
+
+	return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!pm_runtime_suspended(dev))
+		return rotator_clk_crtl(rot, true);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	return  rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	return  rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+	SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+									NULL)
+};
+
+struct platform_driver rotator_driver = {
+	.probe		= rotator_probe,
+	.remove		= __devexit_p(rotator_remove),
+	.id_table	= rotator_driver_ids,
+	.driver		= {
+		.name	= "exynos-rot",
+		.owner	= THIS_MODULE,
+		.pm	= &rotator_pm_ops,
+	},
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 000000000000..a2d7a14a52b6
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	YoungJun Cho <yj44.cho@samsung.com>
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef	_EXYNOS_DRM_ROTATOR_H_
+#define	_EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f7..99bfc38dfaa2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -39,7 +39,6 @@ struct vidi_win_data {
 	unsigned int		fb_height;
 	unsigned int		bpp;
 	dma_addr_t		dma_addr;
-	void __iomem		*vaddr;
 	unsigned int		buf_offsize;
 	unsigned int		line_size;	/* bytes */
 	bool			enabled;
@@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
 	win_data->fb_width = overlay->fb_width;
 	win_data->fb_height = overlay->fb_height;
 	win_data->dma_addr = overlay->dma_addr[0] + offset;
-	win_data->vaddr = overlay->vaddr[0] + offset;
 	win_data->bpp = overlay->bpp;
 	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
 				(overlay->bpp >> 3);
@@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
 			win_data->offset_x, win_data->offset_y);
 	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
 			win_data->ovl_width, win_data->ovl_height);
-	DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
-			(unsigned long)win_data->dma_addr,
-			(unsigned long)win_data->vaddr);
+	DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
 	DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
 			overlay->fb_width, overlay->crtc_width);
 }
@@ -382,7 +378,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
 	unsigned long flags;
-	bool is_checked = false;
 
 	spin_lock_irqsave(&drm_dev->event_lock, flags);
 
@@ -392,8 +387,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
 		if (crtc != e->pipe)
 			continue;
 
-		is_checked = true;
-
 		do_gettimeofday(&now);
 		e->event.sequence = 0;
 		e->event.tv_sec = now.tv_sec;
@@ -401,22 +394,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
 
 		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
 		wake_up_interruptible(&e->base.file_priv->event_wait);
-	}
-
-	if (is_checked) {
-		/*
-		 * call drm_vblank_put only in case that drm_vblank_get was
-		 * called.
-		 */
-		if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
-			drm_vblank_put(drm_dev, crtc);
-
-		/*
-		 * don't off vblank if vblank_disable_allowed is 1,
-		 * because vblank would be off by timer handler.
-		 */
-		if (!drm_dev->vblank_disable_allowed)
-			drm_vblank_off(drm_dev, crtc);
+		drm_vblank_put(drm_dev, crtc);
 	}
 
 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a3..2c46b6c0b82c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -50,6 +50,29 @@
 #define MAX_HEIGHT		1080
 #define get_hdmi_context(dev)	platform_get_drvdata(to_platform_device(dev))
 
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION		0x02
+#define HDMI_AVI_LENGTH		0x0D
+#define AVI_PIC_ASPECT_RATIO_16_9	(2 << 4)
+#define AVI_SAME_AS_PIC_ASPECT_RATIO	8
+
+/* AUI header info */
+#define HDMI_AUI_VERSION	0x01
+#define HDMI_AUI_LENGTH	0x0A
+
+/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
+enum HDMI_PACKET_TYPE {
+	/* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
+	/* InfoFrame packet type */
+	HDMI_PACKET_TYPE_INFOFRAME = 0x80,
+	/* Vendor-Specific InfoFrame */
+	HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
+	/* Auxiliary Video information InfoFrame */
+	HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
+	/* Audio information InfoFrame */
+	HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
+};
+
 enum hdmi_type {
 	HDMI_TYPE13,
 	HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
 	struct mutex			hdmi_mutex;
 
 	void __iomem			*regs;
+	void				*parent_ctx;
 	int				external_irq;
 	int				internal_irq;
 
@@ -84,7 +108,6 @@ struct hdmi_context {
 	int cur_conf;
 
 	struct hdmi_resources		res;
-	void				*parent_ctx;
 
 	int				hpd_gpio;
 
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
 	int height;
 	int vrefresh;
 	bool interlace;
+	int cea_video_id;
 	const u8 *hdmiphy_data;
 	const struct hdmi_v13_preset_conf *conf;
 };
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
 };
 
 static const struct hdmi_v13_conf hdmi_v13_confs[] = {
-	{ 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
-	{ 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
-	{ 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
-	{ 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
-	{ 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
-				 &hdmi_v13_conf_1080p50 },
-	{ 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
-	{ 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
-				 &hdmi_v13_conf_1080p60 },
+	{ 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
+			&hdmi_v13_conf_720p60 },
+	{ 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
+			&hdmi_v13_conf_720p60 },
+	{ 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
+			&hdmi_v13_conf_480p },
+	{ 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
+			&hdmi_v13_conf_1080i50 },
+	{ 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
+			&hdmi_v13_conf_1080p50 },
+	{ 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
+			&hdmi_v13_conf_1080i60 },
+	{ 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
+			&hdmi_v13_conf_1080p60 },
 };
 
 /* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
 	int height;
 	int vrefresh;
 	bool interlace;
+	int cea_video_id;
 	const u8 *hdmiphy_data;
 	const struct hdmi_preset_conf *conf;
 };
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
 };
 
 static const struct hdmi_conf hdmi_confs[] = {
-	{ 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
-	{ 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
-	{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
-	{ 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
-	{ 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
-	{ 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
-	{ 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
-	{ 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+	{ 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+	{ 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
+	{ 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+	{ 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+	{ 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+	{ 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
+	{ 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+	{ 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
 };
 
+struct hdmi_infoframe {
+	enum HDMI_PACKET_TYPE type;
+	u8 ver;
+	u8 len;
+};
 
 static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
 {
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
 	return hdmi_v14_conf_index(mode);
 }
 
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+			u32 start, u8 len, u32 hdr_sum)
+{
+	int i;
+
+	/* hdr_sum : header0 + header1 + header2
+	* start : start address of packet byte1
+	* len : packet bytes - 1 */
+	for (i = 0; i < len; ++i)
+		hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+	/* return 2's complement of 8 bit hdr_sum */
+	return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+			struct hdmi_infoframe *infoframe)
+{
+	u32 hdr_sum;
+	u8 chksum;
+	u32 aspect_ratio;
+	u32 mod;
+	u32 vic;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+	if (hdata->dvi_mode) {
+		hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+				HDMI_VSI_CON_DO_NOT_TRANSMIT);
+		hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+				HDMI_AVI_CON_DO_NOT_TRANSMIT);
+		hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+		return;
+	}
+
+	switch (infoframe->type) {
+	case HDMI_PACKET_TYPE_AVI:
+		hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
+		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
+		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
+		hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+
+		/* Output format zero hardcoded ,RGB YBCR selection */
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+			AVI_ACTIVE_FORMAT_VALID |
+			AVI_UNDERSCANNED_DISPLAY_VALID);
+
+		aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
+
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
+				AVI_SAME_AS_PIC_ASPECT_RATIO);
+
+		if (hdata->type == HDMI_TYPE13)
+			vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
+		else
+			vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+		chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+					infoframe->len, hdr_sum);
+		DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+		hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+		break;
+	case HDMI_PACKET_TYPE_AUI:
+		hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
+		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
+		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
+		hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+		chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+					infoframe->len, hdr_sum);
+		DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+		hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+		break;
+	default:
+		break;
+	}
+}
+
 static bool hdmi_is_connected(void *ctx)
 {
 	struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
 		DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
 			(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
 			raw_edid->width_cm, raw_edid->height_cm);
+		kfree(raw_edid);
 	} else {
 		return -ENODEV;
 	}
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
 
 static void hdmi_conf_init(struct hdmi_context *hdata)
 {
+	struct hdmi_infoframe infoframe;
+
 	/* disable HPD interrupts */
 	hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
 		HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
 		hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
 		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
 	} else {
+		infoframe.type = HDMI_PACKET_TYPE_AVI;
+		infoframe.ver = HDMI_AVI_VERSION;
+		infoframe.len = HDMI_AVI_LENGTH;
+		hdmi_reg_infoframe(hdata, &infoframe);
+
+		infoframe.type = HDMI_PACKET_TYPE_AUI;
+		infoframe.ver = HDMI_AUI_VERSION;
+		infoframe.len = HDMI_AUI_LENGTH;
+		hdmi_reg_infoframe(hdata, &infoframe);
+
 		/* enable AVI packet every vsync, fixes purple line problem */
-		hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
-		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
 		hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
 	}
 }
@@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
 	mdelay(10);
 }
 
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (hdata->type == HDMI_TYPE14)
+		hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
+			HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (hdata->type == HDMI_TYPE14)
+		hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
+			HDMI_PHY_POWER_OFF_EN);
+}
+
 static void hdmiphy_conf_apply(struct hdmi_context *hdata)
 {
 	const u8 *hdmiphy_data;
@@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
 			index = hdmi_v14_conf_index(m);
 
 		if (index >= 0) {
+			struct drm_mode_object base;
+			struct list_head head;
+
 			DRM_INFO("desired mode doesn't exist so\n");
 			DRM_INFO("use the most suitable mode among modes.\n");
+
+			/* preserve display mode header while copying. */
+			head = adjusted_mode->head;
+			base = adjusted_mode->base;
 			memcpy(adjusted_mode, m, sizeof(*m));
+			adjusted_mode->head = head;
+			adjusted_mode->base = base;
 			break;
 		}
 	}
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
 
 	mutex_unlock(&hdata->hdmi_mutex);
 
-	pm_runtime_get_sync(hdata->dev);
-
 	regulator_bulk_enable(res->regul_count, res->regul_bulk);
 	clk_enable(res->hdmiphy);
 	clk_enable(res->hdmi);
 	clk_enable(res->sclk_hdmi);
+
+	hdmiphy_poweron(hdata);
 }
 
 static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
 	 * its reset state seems to meet the condition.
 	 */
 	hdmiphy_conf_reset(hdata);
+	hdmiphy_poweroff(hdata);
 
 	clk_disable(res->sclk_hdmi);
 	clk_disable(res->hdmi);
 	clk_disable(res->hdmiphy);
 	regulator_bulk_disable(res->regul_count, res->regul_bulk);
 
-	pm_runtime_put_sync(hdata->dev);
-
 	mutex_lock(&hdata->hdmi_mutex);
 
 	hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
 {
 	struct hdmi_context *hdata = ctx;
 
-	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+	DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
-		hdmi_poweron(hdata);
+		if (pm_runtime_suspended(hdata->dev))
+			pm_runtime_get_sync(hdata->dev);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		hdmi_poweroff(hdata);
+		if (!pm_runtime_suspended(hdata->dev))
+			pm_runtime_put_sync(hdata->dev);
 		break;
 	default:
 		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
 	memset(res, 0, sizeof(*res));
 
 	/* get clocks, power */
-	res->hdmi = clk_get(dev, "hdmi");
+	res->hdmi = devm_clk_get(dev, "hdmi");
 	if (IS_ERR_OR_NULL(res->hdmi)) {
 		DRM_ERROR("failed to get clock 'hdmi'\n");
 		goto fail;
 	}
-	res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+	res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
 	if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
 		DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
 		goto fail;
 	}
-	res->sclk_pixel = clk_get(dev, "sclk_pixel");
+	res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
 	if (IS_ERR_OR_NULL(res->sclk_pixel)) {
 		DRM_ERROR("failed to get clock 'sclk_pixel'\n");
 		goto fail;
 	}
-	res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+	res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
 	if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
 		DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
 		goto fail;
 	}
-	res->hdmiphy = clk_get(dev, "hdmiphy");
+	res->hdmiphy = devm_clk_get(dev, "hdmiphy");
 	if (IS_ERR_OR_NULL(res->hdmiphy)) {
 		DRM_ERROR("failed to get clock 'hdmiphy'\n");
 		goto fail;
@@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
 
 	clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
 
-	res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+	res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
 		sizeof(res->regul_bulk[0]), GFP_KERNEL);
 	if (!res->regul_bulk) {
 		DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
 		res->regul_bulk[i].supply = supply[i];
 		res->regul_bulk[i].consumer = NULL;
 	}
-	ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
 	if (ret) {
 		DRM_ERROR("failed to get regulators\n");
 		goto fail;
@@ -2217,28 +2373,6 @@ fail:
 	return -ENODEV;
 }
 
-static int hdmi_resources_cleanup(struct hdmi_context *hdata)
-{
-	struct hdmi_resources *res = &hdata->res;
-
-	regulator_bulk_free(res->regul_count, res->regul_bulk);
-	/* kfree is NULL-safe */
-	kfree(res->regul_bulk);
-	if (!IS_ERR_OR_NULL(res->hdmiphy))
-		clk_put(res->hdmiphy);
-	if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
-		clk_put(res->sclk_hdmiphy);
-	if (!IS_ERR_OR_NULL(res->sclk_pixel))
-		clk_put(res->sclk_pixel);
-	if (!IS_ERR_OR_NULL(res->sclk_hdmi))
-		clk_put(res->sclk_hdmi);
-	if (!IS_ERR_OR_NULL(res->hdmi))
-		clk_put(res->hdmi);
-	memset(res, 0, sizeof(*res));
-
-	return 0;
-}
-
 static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
 
 void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
 	}
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmi_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmi",
@@ -2314,6 +2449,7 @@ static struct of_device_id hdmi_match_types[] = {
 		/* end node */
 	}
 };
+#endif
 
 static int __devinit hdmi_probe(struct platform_device *pdev)
 {
@@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
 		const struct of_device_id *match;
 		match = of_match_node(of_match_ptr(hdmi_match_types),
 					pdev->dev.of_node);
+		if (match == NULL)
+			return -ENODEV;
 		hdata->type = (enum hdmi_type)match->data;
 	} else {
 		hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
 	ret = hdmi_resources_init(hdata);
 
 	if (ret) {
-		ret = -EINVAL;
 		DRM_ERROR("hdmi_resources_init failed\n");
-		goto err_data;
+		return -EINVAL;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		DRM_ERROR("failed to find registers\n");
-		ret = -ENOENT;
-		goto err_resource;
+		return -ENOENT;
 	}
 
 	hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
 	if (!hdata->regs) {
 		DRM_ERROR("failed to map registers\n");
-		ret = -ENXIO;
-		goto err_resource;
+		return -ENXIO;
 	}
 
-	ret = gpio_request(hdata->hpd_gpio, "HPD");
+	ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
 	if (ret) {
 		DRM_ERROR("failed to request HPD gpio\n");
-		goto err_resource;
+		return ret;
 	}
 
 	/* DDC i2c driver */
 	if (i2c_add_driver(&ddc_driver)) {
 		DRM_ERROR("failed to register ddc i2c driver\n");
-		ret = -ENOENT;
-		goto err_gpio;
+		return -ENOENT;
 	}
 
 	hdata->ddc_port = hdmi_ddc;
@@ -2470,11 +2604,6 @@ err_hdmiphy:
 	i2c_del_driver(&hdmiphy_driver);
 err_ddc:
 	i2c_del_driver(&ddc_driver);
-err_gpio:
-	gpio_free(hdata->hpd_gpio);
-err_resource:
-	hdmi_resources_cleanup(hdata);
-err_data:
 	return ret;
 }
 
@@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
 	free_irq(hdata->internal_irq, hdata);
 	free_irq(hdata->external_irq, hdata);
 
-	gpio_free(hdata->hpd_gpio);
-
-	hdmi_resources_cleanup(hdata);
 
 	/* hdmiphy i2c driver */
 	i2c_del_driver(&hdmiphy_driver);
@@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
 	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
 	struct hdmi_context *hdata = ctx->ctx;
 
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
 	disable_irq(hdata->internal_irq);
 	disable_irq(hdata->external_irq);
 
@@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
 	if (ctx->drm_dev)
 		drm_helper_hpd_irq_event(ctx->drm_dev);
 
+	if (pm_runtime_suspended(dev)) {
+		DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+		return 0;
+	}
+
 	hdmi_poweroff(hdata);
 
 	return 0;
@@ -2526,13 +2659,52 @@ static int hdmi_resume(struct device *dev)
 	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
 	struct hdmi_context *hdata = ctx->ctx;
 
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
 	enable_irq(hdata->external_irq);
 	enable_irq(hdata->internal_irq);
+
+	if (!pm_runtime_suspended(dev)) {
+		DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+		return 0;
+	}
+
+	hdmi_poweron(hdata);
+
 	return 0;
 }
 #endif
 
-static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int hdmi_runtime_suspend(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+	struct hdmi_context *hdata = ctx->ctx;
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	hdmi_poweroff(hdata);
+
+	return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+	struct hdmi_context *hdata = ctx->ctx;
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	hdmi_poweron(hdata);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
+	SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
+};
 
 struct platform_driver hdmi_driver = {
 	.probe		= hdmi_probe,
@@ -2542,6 +2714,6 @@ struct platform_driver hdmi_driver = {
 		.name	= "exynos-hdmi",
 		.owner	= THIS_MODULE,
 		.pm	= &hdmi_pm_ops,
-		.of_match_table = hdmi_match_types,
+		.of_match_table = of_match_ptr(hdmi_match_types),
 	},
 };
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 27d1720f1bbd..6206056f4a33 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
 	{ },
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmiphy_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmiphy",
@@ -53,12 +54,13 @@ static struct of_device_id hdmiphy_match_types[] = {
 		/* end node */
 	}
 };
+#endif
 
 struct i2c_driver hdmiphy_driver = {
 	.driver = {
 		.name	= "exynos-hdmiphy",
 		.owner	= THIS_MODULE,
-		.of_match_table = hdmiphy_match_types,
+		.of_match_table = of_match_ptr(hdmiphy_match_types),
 	},
 	.id_table = hdmiphy_id,
 	.probe		= hdmiphy_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e7fbb823fd8e..21db89530fc7 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,14 +36,13 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
 
 #define get_mixer_context(dev)	platform_get_drvdata(to_platform_device(dev))
 
 struct hdmi_win_data {
 	dma_addr_t		dma_addr;
-	void __iomem		*vaddr;
 	dma_addr_t		chroma_dma_addr;
-	void __iomem		*chroma_vaddr;
 	uint32_t		pixel_format;
 	unsigned int		bpp;
 	unsigned int		crtc_x;
@@ -59,6 +58,8 @@ struct hdmi_win_data {
 	unsigned int		mode_width;
 	unsigned int		mode_height;
 	unsigned int		scan_flags;
+	bool			enabled;
+	bool			resume;
 };
 
 struct mixer_resources {
@@ -80,6 +81,7 @@ enum mixer_version_id {
 
 struct mixer_context {
 	struct device		*dev;
+	struct drm_device	*drm_dev;
 	int			pipe;
 	bool			interlace;
 	bool			powered;
@@ -90,6 +92,9 @@ struct mixer_context {
 	struct mixer_resources	mixer_res;
 	struct hdmi_win_data	win_data[MIXER_WIN_NR];
 	enum mixer_version_id	mxr_ver;
+	void			*parent_ctx;
+	wait_queue_head_t	wait_vsync_queue;
+	atomic_t		wait_vsync_event;
 };
 
 struct mixer_drv_data {
@@ -665,58 +670,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
 	spin_unlock_irqrestore(&res->reg_slock, flags);
 }
 
-static void mixer_poweron(struct mixer_context *ctx)
+static int mixer_iommu_on(void *ctx, bool enable)
 {
-	struct mixer_resources *res = &ctx->mixer_res;
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+	struct mixer_context *mdata = ctx;
+	struct drm_device *drm_dev;
 
-	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+	drm_hdmi_ctx = mdata->parent_ctx;
+	drm_dev = drm_hdmi_ctx->drm_dev;
 
-	mutex_lock(&ctx->mixer_mutex);
-	if (ctx->powered) {
-		mutex_unlock(&ctx->mixer_mutex);
-		return;
+	if (is_drm_iommu_supported(drm_dev)) {
+		if (enable)
+			return drm_iommu_attach_device(drm_dev, mdata->dev);
+
+		drm_iommu_detach_device(drm_dev, mdata->dev);
 	}
-	ctx->powered = true;
-	mutex_unlock(&ctx->mixer_mutex);
-
-	pm_runtime_get_sync(ctx->dev);
-
-	clk_enable(res->mixer);
-	if (ctx->vp_enabled) {
-		clk_enable(res->vp);
-		clk_enable(res->sclk_mixer);
-	}
-
-	mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
-	mixer_win_reset(ctx);
-}
-
-static void mixer_poweroff(struct mixer_context *ctx)
-{
-	struct mixer_resources *res = &ctx->mixer_res;
-
-	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-	mutex_lock(&ctx->mixer_mutex);
-	if (!ctx->powered)
-		goto out;
-	mutex_unlock(&ctx->mixer_mutex);
-
-	ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
-
-	clk_disable(res->mixer);
-	if (ctx->vp_enabled) {
-		clk_disable(res->vp);
-		clk_disable(res->sclk_mixer);
-	}
-
-	pm_runtime_put_sync(ctx->dev);
-
-	mutex_lock(&ctx->mixer_mutex);
-	ctx->powered = false;
-
-out:
-	mutex_unlock(&ctx->mixer_mutex);
+	return 0;
 }
 
 static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +715,6 @@ static void mixer_disable_vblank(void *ctx)
 	mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
-static void mixer_dpms(void *ctx, int mode)
-{
-	struct mixer_context *mixer_ctx = ctx;
-
-	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		mixer_poweron(mixer_ctx);
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		mixer_poweroff(mixer_ctx);
-		break;
-	default:
-		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
-		break;
-	}
-}
-
-static void mixer_wait_for_vblank(void *ctx)
-{
-	struct mixer_context *mixer_ctx = ctx;
-	struct mixer_resources *res = &mixer_ctx->mixer_res;
-	int ret;
-
-	ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
-				MXR_INT_STATUS_VSYNC), 50);
-	if (ret < 0)
-		DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
 static void mixer_win_mode_set(void *ctx,
 			      struct exynos_drm_overlay *overlay)
 {
@@ -811,9 +747,7 @@ static void mixer_win_mode_set(void *ctx,
 	win_data = &mixer_ctx->win_data[win];
 
 	win_data->dma_addr = overlay->dma_addr[0];
-	win_data->vaddr = overlay->vaddr[0];
 	win_data->chroma_dma_addr = overlay->dma_addr[1];
-	win_data->chroma_vaddr = overlay->vaddr[1];
 	win_data->pixel_format = overlay->pixel_format;
 	win_data->bpp = overlay->bpp;
 
@@ -845,6 +779,8 @@ static void mixer_win_commit(void *ctx, int win)
 		vp_video_buffer(mixer_ctx, win);
 	else
 		mixer_graph_buffer(mixer_ctx, win);
+
+	mixer_ctx->win_data[win].enabled = true;
 }
 
 static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +791,14 @@ static void mixer_win_disable(void *ctx, int win)
 
 	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
 
+	mutex_lock(&mixer_ctx->mixer_mutex);
+	if (!mixer_ctx->powered) {
+		mutex_unlock(&mixer_ctx->mixer_mutex);
+		mixer_ctx->win_data[win].resume = false;
+		return;
+	}
+	mutex_unlock(&mixer_ctx->mixer_mutex);
+
 	spin_lock_irqsave(&res->reg_slock, flags);
 	mixer_vsync_set_update(mixer_ctx, false);
 
@@ -862,16 +806,144 @@ static void mixer_win_disable(void *ctx, int win)
 
 	mixer_vsync_set_update(mixer_ctx, true);
 	spin_unlock_irqrestore(&res->reg_slock, flags);
+
+	mixer_ctx->win_data[win].enabled = false;
+}
+
+static void mixer_wait_for_vblank(void *ctx)
+{
+	struct mixer_context *mixer_ctx = ctx;
+
+	mutex_lock(&mixer_ctx->mixer_mutex);
+	if (!mixer_ctx->powered) {
+		mutex_unlock(&mixer_ctx->mixer_mutex);
+		return;
+	}
+	mutex_unlock(&mixer_ctx->mixer_mutex);
+
+	atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+	/*
+	 * wait for MIXER to signal VSYNC interrupt or return after
+	 * timeout which is set to 50ms (refresh rate of 20).
+	 */
+	if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+				!atomic_read(&mixer_ctx->wait_vsync_event),
+				DRM_HZ/20))
+		DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+static void mixer_window_suspend(struct mixer_context *ctx)
+{
+	struct hdmi_win_data *win_data;
+	int i;
+
+	for (i = 0; i < MIXER_WIN_NR; i++) {
+		win_data = &ctx->win_data[i];
+		win_data->resume = win_data->enabled;
+		mixer_win_disable(ctx, i);
+	}
+	mixer_wait_for_vblank(ctx);
+}
+
+static void mixer_window_resume(struct mixer_context *ctx)
+{
+	struct hdmi_win_data *win_data;
+	int i;
+
+	for (i = 0; i < MIXER_WIN_NR; i++) {
+		win_data = &ctx->win_data[i];
+		win_data->enabled = win_data->resume;
+		win_data->resume = false;
+	}
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mutex_lock(&ctx->mixer_mutex);
+	if (ctx->powered) {
+		mutex_unlock(&ctx->mixer_mutex);
+		return;
+	}
+	ctx->powered = true;
+	mutex_unlock(&ctx->mixer_mutex);
+
+	clk_enable(res->mixer);
+	if (ctx->vp_enabled) {
+		clk_enable(res->vp);
+		clk_enable(res->sclk_mixer);
+	}
+
+	mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+	mixer_win_reset(ctx);
+
+	mixer_window_resume(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mutex_lock(&ctx->mixer_mutex);
+	if (!ctx->powered)
+		goto out;
+	mutex_unlock(&ctx->mixer_mutex);
+
+	mixer_window_suspend(ctx);
+
+	ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+	clk_disable(res->mixer);
+	if (ctx->vp_enabled) {
+		clk_disable(res->vp);
+		clk_disable(res->sclk_mixer);
+	}
+
+	mutex_lock(&ctx->mixer_mutex);
+	ctx->powered = false;
+
+out:
+	mutex_unlock(&ctx->mixer_mutex);
+}
+
+static void mixer_dpms(void *ctx, int mode)
+{
+	struct mixer_context *mixer_ctx = ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (pm_runtime_suspended(mixer_ctx->dev))
+			pm_runtime_get_sync(mixer_ctx->dev);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (!pm_runtime_suspended(mixer_ctx->dev))
+			pm_runtime_put_sync(mixer_ctx->dev);
+		break;
+	default:
+		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+		break;
+	}
 }
 
 static struct exynos_mixer_ops mixer_ops = {
 	/* manager */
+	.iommu_on		= mixer_iommu_on,
 	.enable_vblank		= mixer_enable_vblank,
 	.disable_vblank		= mixer_disable_vblank,
+	.wait_for_vblank	= mixer_wait_for_vblank,
 	.dpms			= mixer_dpms,
 
 	/* overlay */
-	.wait_for_vblank	= mixer_wait_for_vblank,
 	.win_mode_set		= mixer_win_mode_set,
 	.win_commit		= mixer_win_commit,
 	.win_disable		= mixer_win_disable,
@@ -884,7 +956,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
 	unsigned long flags;
-	bool is_checked = false;
 
 	spin_lock_irqsave(&drm_dev->event_lock, flags);
 
@@ -894,7 +965,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
 		if (crtc != e->pipe)
 			continue;
 
-		is_checked = true;
 		do_gettimeofday(&now);
 		e->event.sequence = 0;
 		e->event.tv_sec = now.tv_sec;
@@ -902,16 +972,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
 
 		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
 		wake_up_interruptible(&e->base.file_priv->event_wait);
+		drm_vblank_put(drm_dev, crtc);
 	}
 
-	if (is_checked)
-		/*
-		 * call drm_vblank_put only in case that drm_vblank_get was
-		 * called.
-		 */
-		if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
-			drm_vblank_put(drm_dev, crtc);
-
 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
 }
 
@@ -944,6 +1007,12 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
 		drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
 		mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+
+		/* set wait vsync event to zero and wake up queue. */
+		if (atomic_read(&ctx->wait_vsync_event)) {
+			atomic_set(&ctx->wait_vsync_event, 0);
+			DRM_WAKEUP(&ctx->wait_vsync_queue);
+		}
 	}
 
 out:
@@ -971,57 +1040,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
 
 	spin_lock_init(&mixer_res->reg_slock);
 
-	mixer_res->mixer = clk_get(dev, "mixer");
+	mixer_res->mixer = devm_clk_get(dev, "mixer");
 	if (IS_ERR_OR_NULL(mixer_res->mixer)) {
 		dev_err(dev, "failed to get clock 'mixer'\n");
-		ret = -ENODEV;
-		goto fail;
+		return -ENODEV;
 	}
 
-	mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+	mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
 	if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
 		dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
-		ret = -ENODEV;
-		goto fail;
+		return -ENODEV;
 	}
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res == NULL) {
 		dev_err(dev, "get memory resource failed.\n");
-		ret = -ENXIO;
-		goto fail;
+		return -ENXIO;
 	}
 
 	mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
 							resource_size(res));
 	if (mixer_res->mixer_regs == NULL) {
 		dev_err(dev, "register mapping failed.\n");
-		ret = -ENXIO;
-		goto fail;
+		return -ENXIO;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (res == NULL) {
 		dev_err(dev, "get interrupt resource failed.\n");
-		ret = -ENXIO;
-		goto fail;
+		return -ENXIO;
 	}
 
 	ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
 							0, "drm_mixer", ctx);
 	if (ret) {
 		dev_err(dev, "request interrupt failed.\n");
-		goto fail;
+		return ret;
 	}
 	mixer_res->irq = res->start;
 
 	return 0;
-
-fail:
-	if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
-		clk_put(mixer_res->sclk_hdmi);
-	if (!IS_ERR_OR_NULL(mixer_res->mixer))
-		clk_put(mixer_res->mixer);
-	return ret;
 }
 
 static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1031,25 +1088,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
 	struct device *dev = &pdev->dev;
 	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
 	struct resource *res;
-	int ret;
 
-	mixer_res->vp = clk_get(dev, "vp");
+	mixer_res->vp = devm_clk_get(dev, "vp");
 	if (IS_ERR_OR_NULL(mixer_res->vp)) {
 		dev_err(dev, "failed to get clock 'vp'\n");
-		ret = -ENODEV;
-		goto fail;
+		return -ENODEV;
 	}
-	mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+	mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
 	if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
 		dev_err(dev, "failed to get clock 'sclk_mixer'\n");
-		ret = -ENODEV;
-		goto fail;
+		return -ENODEV;
 	}
-	mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+	mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
 	if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
 		dev_err(dev, "failed to get clock 'sclk_dac'\n");
-		ret = -ENODEV;
-		goto fail;
+		return -ENODEV;
 	}
 
 	if (mixer_res->sclk_hdmi)
@@ -1058,28 +1111,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	if (res == NULL) {
 		dev_err(dev, "get memory resource failed.\n");
-		ret = -ENXIO;
-		goto fail;
+		return -ENXIO;
 	}
 
 	mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
 							resource_size(res));
 	if (mixer_res->vp_regs == NULL) {
 		dev_err(dev, "register mapping failed.\n");
-		ret = -ENXIO;
-		goto fail;
+		return -ENXIO;
 	}
 
 	return 0;
-
-fail:
-	if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
-		clk_put(mixer_res->sclk_dac);
-	if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
-		clk_put(mixer_res->sclk_mixer);
-	if (!IS_ERR_OR_NULL(mixer_res->vp))
-		clk_put(mixer_res->vp);
-	return ret;
 }
 
 static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1149,9 +1191,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
 	}
 
 	ctx->dev = &pdev->dev;
+	ctx->parent_ctx = (void *)drm_hdmi_ctx;
 	drm_hdmi_ctx->ctx = (void *)ctx;
 	ctx->vp_enabled = drv->is_vp_enabled;
 	ctx->mxr_ver = drv->version;
+	DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+	atomic_set(&ctx->wait_vsync_event, 0);
 
 	platform_set_drvdata(pdev, drm_hdmi_ctx);
 
@@ -1202,13 +1247,66 @@ static int mixer_suspend(struct device *dev)
 	struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
 	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
 
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (pm_runtime_suspended(dev)) {
+		DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+		return 0;
+	}
+
 	mixer_poweroff(ctx);
 
 	return 0;
 }
+
+static int mixer_resume(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (!pm_runtime_suspended(dev)) {
+		DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+		return 0;
+	}
+
+	mixer_poweron(ctx);
+
+	return 0;
+}
 #endif
 
-static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+#ifdef CONFIG_PM_RUNTIME
+static int mixer_runtime_suspend(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mixer_poweroff(ctx);
+
+	return 0;
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mixer_poweron(ctx);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops mixer_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
+	SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
+};
 
 struct platform_driver mixer_driver = {
 	.driver = {
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 000000000000..b4f9ca1fd851
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT		(0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST		(0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL		(0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2	(0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1		(0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2		(0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3		(0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4		(0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1		(0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2		(0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3		(0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4		(0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1		(0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2		(0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3		(0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4		(0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT		(0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL		(0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO	(0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST		(0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL		(0x58)
+/* Target area */
+#define EXYNOS_CITAREA		(0x5c)
+/* Status */
+#define EXYNOS_CISTATUS		(0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2		(0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT		(0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ		(0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF		(0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0		(0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0		(0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0		(0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y	(0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB	(0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR	(0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE	(0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL		(0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1		(0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1		(0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1		(0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF		(0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF		(0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF		(0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF		(0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF		(0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF		(0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE		(0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE		(0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN		(0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM		(0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT		(0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC		(0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ		(0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5		(0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6		(0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7		(0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8		(0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9		(0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10		(0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11		(0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12		(0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13		(0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14		(0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15		(0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16		(0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17		(0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18		(0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19		(0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20		(0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21		(0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22		(0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23		(0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24		(0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25		(0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26		(0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27		(0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28		(0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29		(0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30		(0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31		(0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32		(0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5		(0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6		(0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7		(0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8		(0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9		(0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10		(0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11		(0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12		(0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13		(0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14		(0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15		(0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16		(0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17		(0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18		(0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19		(0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20		(0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21		(0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22		(0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23		(0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24		(0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25		(0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26		(0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27		(0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28		(0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29		(0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30		(0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31		(0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32		(0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5		(0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6		(0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7		(0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8		(0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9		(0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10		(0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11		(0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12		(0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13		(0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14		(0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15		(0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16		(0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17		(0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18		(0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19		(0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20		(0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21		(0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22		(0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23		(0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24		(0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25		(0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26		(0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27		(0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28		(0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29		(0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30		(0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31		(0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32		(0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP		4
+#define EXYNOS_CIOYSA(__x)		\
+	(((__x) < DEF_PP) ?	\
+	 (EXYNOS_CIOYSA1  + (__x) * 4) : \
+	(EXYNOS_CIOYSA5  + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x)	\
+	(((__x) < DEF_PP) ?	\
+	 (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+	(EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x)	\
+	(((__x) < DEF_PP) ?	\
+	 (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+	(EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP		1
+#define EXYNOS_CIIYSA(__x)		\
+	(((__x) < DEF_IPP) ?	\
+	 (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x)	\
+	(((__x) < DEF_IPP) ?	\
+	 (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x)	\
+	(((__x) < DEF_IPP) ?	\
+	 (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x)		((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x)		((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x)		((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x)		((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x)		((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x)		((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x)		(((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x)		(((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x)		((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x)		((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x)		((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x)		((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x)		((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x)		((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x)		((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x)		((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x)		(((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x)		(((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x)	(((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x)		(((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x)	(((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x)	(((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x)	((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x)			((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x)			((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x)			((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x)			(((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x)		((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x)		((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x)		((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x)		((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x)			((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x)		((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x)		((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x)			((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x)		((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x)		((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x)		((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x)		((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x)		((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x)		((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x)		(((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x)		((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT		(1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT		(0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT		(1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR		(0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB		(1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY		(2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY		(3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR	(0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB	(1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN			(1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY			(1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB			(1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK		(0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB			(1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR			(1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK		(0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST			(1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A			(1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B		(0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A		(1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK		(1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL		(0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR	(1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC		(2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC		(3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK		(3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT		(27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK			(1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC			(1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF			(1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN			(1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK			(1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE			(0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL			(1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR			(1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE		(1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE			(0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE			(1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE		(1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG			(1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B		(0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A		(1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK		(1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA	(0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK	(1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK		(1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A		(1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B		(0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK		(1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601			(0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709			(1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK			(1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC			(1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU		(0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI		(1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK		(1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE			(0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE			(1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK		(0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK		(0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE		(1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420		(0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422		(1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE	(2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB		(3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK		(3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT			(14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL		(0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR		(1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR		(2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180			(3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK			(3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE		(1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK		(0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK		(0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT			(1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK			(1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN			(1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR		(0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB		(1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB		(2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR		(3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT		(24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK		(3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE		(0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE		(1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK		(1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE		(1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT			(0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR		(0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB		(1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY		(2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY		(3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK		(3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS		(1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H			(1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V			(1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW		(0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE		(1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW		(0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE		(1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO		(1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE		(0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE			(1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK			(1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART		(1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565		(0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666		(1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888		(2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK		(3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565		(0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666		(1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888		(2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK	(3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL		(0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION		(1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE			(1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK		(0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK		(0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY			(1 << 31)
+#define EXYNOS_CISTATUS_OVFICB			(1 << 30)
+#define EXYNOS_CISTATUS_OVFICR			(1 << 29)
+#define EXYNOS_CISTATUS_VSYNC			(1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART		(1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN			(1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN			(1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC			(1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A			(1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B			(1 << 19)
+#define EXYNOS_CISTATUS_OVRLB			(1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND			(1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND		(1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A			(1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B			(1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN			(1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC		(1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE		(1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN		(0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT		(1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE			(0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE			(1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE		(0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER		(1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS			(0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY		(1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE		(2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE		(3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING		(4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE		(5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK			(7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK		((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE	(1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE	(1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK		(0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK		(0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK			(1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE			(1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL			(0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT			(24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK		(0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR		(0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB		(1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB		(2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR		(3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT		(16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK		(0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE		(0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE		(1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT			(13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL			(0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR		(1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR		(2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180			(3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK			(3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY		(0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB		(1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY		(2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR		(3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM			(0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY			(1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK			(1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420		(0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422		(1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE	(2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB			(3 << 1)
+#define EXYNOS_MSCTRL_ENVID			(1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR		(0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE		(1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16		(2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32		(3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK		(3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64		(0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128		(1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256		(2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512		(3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024	(4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048	(5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096	(6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1		(0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2		(1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4		(2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8		(3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16		(4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32		(5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR		(0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE		(1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16		(2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32		(3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK		(3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64		(0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128		(1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256		(2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512		(3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024	(4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048	(5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096	(6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1		(0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2		(1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4		(2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8		(3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16		(4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32		(5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK		(1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK		(1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK	(0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK	(0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT			(1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK				(0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK			(1 << 1)
+#define EXYNOS_CLKSRC_SCLK				(1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK			(S3C_VA_SYS + 0x0218)
+#define SYSREG_ISP_BLK				(S3C_VA_SYS + 0x020c)
+#define SYSREG_FIMD0WB_DEST_MASK	(0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT	23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 000000000000..9ad592707aaf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE			0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK	(1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR	(0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS	(1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK	(1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE	(1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK	(1 << 7)
+#define GSC_ENABLE_NORM_MODE		(0 << 7)
+#define GSC_ENABLE_IPC_MODE		(1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK	(1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE	(1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE		(1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK	(1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT	(1 << 4)
+#define GSC_ENABLE_QOS_ENABLE		(1 << 3)
+#define GSC_ENABLE_OP_STATUS		(1 << 2)
+#define GSC_ENABLE_SFR_UPDATE		(1 << 1)
+#define GSC_ENABLE_ON			(1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET			0x04
+#define GSC_SW_RESET_SRESET		(1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ				0x08
+#define GSC_IRQ_STATUS_OR_IRQ		(1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE	(1 << 16)
+#define GSC_IRQ_OR_MASK			(1 << 2)
+#define GSC_IRQ_FRMDONE_MASK		(1 << 1)
+#define GSC_IRQ_ENABLE			(1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON			0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK	(1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR	(1 << 20)
+#define GSC_IN_RB_SWAP_MASK		(1 << 19)
+#define GSC_IN_RB_SWAP			(1 << 19)
+#define GSC_IN_ROT_MASK			(7 << 16)
+#define GSC_IN_ROT_270			(7 << 16)
+#define GSC_IN_ROT_90_YFLIP		(6 << 16)
+#define GSC_IN_ROT_90_XFLIP		(5 << 16)
+#define GSC_IN_ROT_90			(4 << 16)
+#define GSC_IN_ROT_180			(3 << 16)
+#define GSC_IN_ROT_YFLIP		(2 << 16)
+#define GSC_IN_ROT_XFLIP		(1 << 16)
+#define GSC_IN_RGB_TYPE_MASK		(3 << 14)
+#define GSC_IN_RGB_HD_WIDE		(3 << 14)
+#define GSC_IN_RGB_HD_NARROW		(2 << 14)
+#define GSC_IN_RGB_SD_WIDE		(1 << 14)
+#define GSC_IN_RGB_SD_NARROW		(0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK	(1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y	(0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C	(1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK	(1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR	(0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB	(1 << 12)
+#define GSC_IN_FORMAT_MASK		(7 << 8)
+#define GSC_IN_XRGB8888			(0 << 8)
+#define GSC_IN_RGB565			(1 << 8)
+#define GSC_IN_YUV420_2P		(2 << 8)
+#define GSC_IN_YUV420_3P		(3 << 8)
+#define GSC_IN_YUV422_1P		(4 << 8)
+#define GSC_IN_YUV422_2P		(5 << 8)
+#define GSC_IN_YUV422_3P		(6 << 8)
+#define GSC_IN_TILE_TYPE_MASK		(1 << 4)
+#define GSC_IN_TILE_C_16x8		(0 << 4)
+#define GSC_IN_TILE_C_16x16		(1 << 4)
+#define GSC_IN_TILE_MODE		(1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK		(3 << 1)
+#define GSC_IN_LOCAL_CAM3		(3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB		(2 << 1)
+#define GSC_IN_LOCAL_CAM1		(1 << 1)
+#define GSC_IN_LOCAL_CAM0		(0 << 1)
+#define GSC_IN_PATH_MASK		(1 << 0)
+#define GSC_IN_PATH_LOCAL		(1 << 0)
+#define GSC_IN_PATH_MEMORY		(0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE			0x14
+#define GSC_SRCIMG_HEIGHT_MASK		(0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x)		((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK		(0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x)		((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET		0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK	(0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x)		((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK	(0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x)		((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE		0x1C
+#define GSC_CROPPED_HEIGHT_MASK		(0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x)		((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK		(0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x)		((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON			0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK	(0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x)		((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK	(1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR	(1 << 13)
+#define GSC_OUT_RB_SWAP_MASK		(1 << 12)
+#define GSC_OUT_RB_SWAP			(1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK		(3 << 10)
+#define GSC_OUT_RGB_HD_NARROW		(3 << 10)
+#define GSC_OUT_RGB_HD_WIDE		(2 << 10)
+#define GSC_OUT_RGB_SD_NARROW		(1 << 10)
+#define GSC_OUT_RGB_SD_WIDE		(0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK	(1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y	(0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C	(1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK	(1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR	(0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB	(1 << 8)
+#define GSC_OUT_FORMAT_MASK		(7 << 4)
+#define GSC_OUT_XRGB8888		(0 << 4)
+#define GSC_OUT_RGB565			(1 << 4)
+#define GSC_OUT_YUV420_2P		(2 << 4)
+#define GSC_OUT_YUV420_3P		(3 << 4)
+#define GSC_OUT_YUV422_1P		(4 << 4)
+#define GSC_OUT_YUV422_2P		(5 << 4)
+#define GSC_OUT_YUV444			(7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK		(1 << 2)
+#define GSC_OUT_TILE_C_16x8		(0 << 2)
+#define GSC_OUT_TILE_C_16x16		(1 << 2)
+#define GSC_OUT_TILE_MODE		(1 << 1)
+#define GSC_OUT_PATH_MASK		(1 << 0)
+#define GSC_OUT_PATH_LOCAL		(1 << 0)
+#define GSC_OUT_PATH_MEMORY		(0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE			0x24
+#define GSC_SCALED_HEIGHT_MASK		(0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x)		((x) << 16)
+#define GSC_SCALED_WIDTH_MASK		(0x1fff << 0)
+#define GSC_SCALED_WIDTH(x)		((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO		0x28
+#define GSC_PRESC_SHFACTOR_MASK		(7 << 28)
+#define GSC_PRESC_SHFACTOR(x)		((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK		(7 << 16)
+#define GSC_PRESC_V_RATIO(x)		((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK		(7 << 0)
+#define GSC_PRESC_H_RATIO(x)		((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO		0x2C
+#define GSC_MAIN_H_RATIO_MASK		(0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x)	((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO		0x30
+#define GSC_MAIN_V_RATIO_MASK		(0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x)	((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE		0x3C
+#define GSC_IN_CHROM_STRIDE_MASK	(0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x)	((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE			0x40
+#define GSC_DSTIMG_HEIGHT_MASK		(0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x)		((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK		(0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x)		((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET		0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK	(0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x)		((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK	(0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x)		((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE		0x48
+#define GSC_OUT_CHROM_STRIDE_MASK	(0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x)	((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK		0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n)		(0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n)	(0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK	0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n)		(0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n)	(0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK	0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n)		(0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n)	(0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX	(0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x)	((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x)	((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK		(0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK	0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n)		(0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK	0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n)		(0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK	0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n)		(0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX		(0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x)	((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x)	((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK		(0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x)	(0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x)	(0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON			0xA78
+#define GSC_BUSCON_INT_TIME_MASK	(1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS	(0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE	(1 << 8)
+#define GSC_BUSCON_AWCACHE(x)		((x) << 4)
+#define GSC_BUSCON_ARCACHE(x)		((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION			0xA7C
+#define GSC_VPOS_F(x)			((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT		0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x)	((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT		0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x)	((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1		(S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x)		(x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x)	(1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x)	(0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x)	(1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2		(S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x)	(1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718c..ef1b3eb3ba6e 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -176,6 +176,11 @@
 #define HDMI_PHY_CMU			HDMI_CTRL_BASE(0x007C)
 #define HDMI_CORE_RSTOUT		HDMI_CTRL_BASE(0x0080)
 
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN		(1 << 0)
+
 /* Video related registers */
 #define HDMI_YMAX			HDMI_CORE_BASE(0x0060)
 #define HDMI_YMIN			HDMI_CORE_BASE(0x0064)
@@ -298,14 +303,14 @@
 #define HDMI_AVI_HEADER1		HDMI_CORE_BASE(0x0714)
 #define HDMI_AVI_HEADER2		HDMI_CORE_BASE(0x0718)
 #define HDMI_AVI_CHECK_SUM		HDMI_CORE_BASE(0x071C)
-#define HDMI_AVI_BYTE(n)		HDMI_CORE_BASE(0x0720 + 4 * (n))
+#define HDMI_AVI_BYTE(n)		HDMI_CORE_BASE(0x0720 + 4 * (n-1))
 
 #define HDMI_AUI_CON			HDMI_CORE_BASE(0x0800)
 #define HDMI_AUI_HEADER0		HDMI_CORE_BASE(0x0810)
 #define HDMI_AUI_HEADER1		HDMI_CORE_BASE(0x0814)
 #define HDMI_AUI_HEADER2		HDMI_CORE_BASE(0x0818)
 #define HDMI_AUI_CHECK_SUM		HDMI_CORE_BASE(0x081C)
-#define HDMI_AUI_BYTE(n)		HDMI_CORE_BASE(0x0820 + 4 * (n))
+#define HDMI_AUI_BYTE(n)		HDMI_CORE_BASE(0x0820 + 4 * (n-1))
 
 #define HDMI_MPG_CON			HDMI_CORE_BASE(0x0900)
 #define HDMI_MPG_CHECK_SUM		HDMI_CORE_BASE(0x091C)
@@ -338,6 +343,19 @@
 #define HDMI_AN_SEED_2			HDMI_CORE_BASE(0x0E60)
 #define HDMI_AN_SEED_3			HDMI_CORE_BASE(0x0E64)
 
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT	(0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC	(1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID	(1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID	(1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN		(0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT	(0 << 0)
+
 /* HDCP related registers */
 #define HDMI_HDCP_SHA1(n)		HDMI_CORE_BASE(0x7000 + 4 * (n))
 #define HDMI_HDCP_KSV_LIST(n)		HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 000000000000..a09ac6e180da
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG			0x00
+#define ROT_CONFIG_IRQ			(3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL			0x10
+#define ROT_CONTROL_PATTERN_WRITE	(1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P	(1 << 8)
+#define ROT_CONTROL_FMT_RGB888		(6 << 8)
+#define ROT_CONTROL_FMT_MASK		(7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL	(2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL	(3 << 6)
+#define ROT_CONTROL_FLIP_MASK		(3 << 6)
+#define ROT_CONTROL_ROT_90		(1 << 4)
+#define ROT_CONTROL_ROT_180		(2 << 4)
+#define ROT_CONTROL_ROT_270		(3 << 4)
+#define ROT_CONTROL_ROT_MASK		(3 << 4)
+#define ROT_CONTROL_START		(1 << 0)
+
+/* Status */
+#define ROT_STATUS			0x20
+#define ROT_STATUS_IRQ_PENDING(x)	(1 << (x))
+#define ROT_STATUS_IRQ(x)		(((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE	1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL	2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n)		(0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n)		(0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE		0x3c
+#define ROT_DST_BUF_SIZE		0x5c
+#define ROT_SET_BUF_SIZE_H(x)		((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x)		((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x)		((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x)		((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS		0x40
+#define ROT_DST_CROP_POS		0x60
+#define ROT_CROP_POS_Y(x)		((x) << 16)
+#define ROT_CROP_POS_X(x)		((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE		0x44
+#define ROT_SRC_CROP_SIZE_H(x)		((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x)		((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask)	(((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask)		(((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask)		((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1ceca3d13b65..23e14e93991f 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
 
 		dev_priv->force_audio_property = prop;
 	}
-	drm_connector_attach_property(connector, prop, 0);
+	drm_object_attach_property(&connector->base, prop, 0);
 }
 
 
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
 		dev_priv->broadcast_rgb_property = prop;
 	}
 
-	drm_connector_attach_property(connector, prop, 0);
+	drm_object_attach_property(&connector->base, prop, 0);
 }
 
 /* Cedarview */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index e3a3978cf320..51044cc55cf2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	int ret;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 7272a461edfe..e223b500022e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
 			return -1;
 		}
 
-		if (drm_connector_property_get_value(connector,
+		if (drm_object_property_get_value(&connector->base,
 							property, &curValue))
 			return -1;
 
 		if (curValue == value)
 			return 0;
 
-		if (drm_connector_property_set_value(connector,
+		if (drm_object_property_set_value(&connector->base,
 							property, value))
 			return -1;
 
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
 	connector->interlace_allowed = false;
 	connector->doublescan_allowed = false;
 
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.scaling_mode_property,
 				      DRM_MODE_SCALE_FULLSCREEN);
 
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index b362dd39bf5a..d81dbc3368f0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
 			return -1;
 		}
 
-		if (drm_connector_property_get_value(connector,
+		if (drm_object_property_get_value(&connector->base,
 						     property,
 						     &curValue))
 			return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
 		if (curValue == value)
 			return 0;
 
-		if (drm_connector_property_set_value(connector,
+		if (drm_object_property_set_value(&connector->base,
 							property,
 							value))
 			return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
 				return -1;
 		}
 	} else if (!strcmp(property->name, "backlight") && encoder) {
-		if (drm_connector_property_set_value(connector,
+		if (drm_object_property_set_value(&connector->base,
 							property,
 							value))
 			return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
 	connector->doublescan_allowed = false;
 
 	/*Attach connector properties*/
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.scaling_mode_property,
 				      DRM_MODE_SCALE_FULLSCREEN);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev_priv->backlight_property,
 				      BRIGHTNESS_MAX_LEVEL);
 
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 32dba2ab53e1..2d4ab48f07a2 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
 			goto set_prop_error;
 		}
 
-		if (drm_connector_property_get_value(connector, property, &val))
+		if (drm_object_property_get_value(&connector->base, property, &val))
 			goto set_prop_error;
 
 		if (val == value)
 			goto set_prop_done;
 
-		if (drm_connector_property_set_value(connector,
+		if (drm_object_property_set_value(&connector->base,
 							property, value))
 			goto set_prop_error;
 
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
 			}
 		}
 	} else if (!strcmp(property->name, "backlight") && encoder) {
-		if (drm_connector_property_set_value(connector, property,
+		if (drm_object_property_set_value(&connector->base, property,
 									value))
 			goto set_prop_error;
 		else
@@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
 
 	dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
 
-	if (!dev || ((pipe != 0) && (pipe != 2))) {
+	if (pipe != 0 && pipe != 2) {
 		DRM_ERROR("Invalid parameter\n");
 		return;
 	}
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
 	connector->doublescan_allowed = false;
 
 	/*attach properties*/
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				dev->mode_config.scaling_mode_property,
 				DRM_MODE_SCALE_FULLSCREEN);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				dev_priv->backlight_property,
 				MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
 
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index dec6a9aea3c6..74485dc43945 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 	REG_WRITE(map->pos, 0);
 
 	if (psb_intel_encoder)
-		drm_connector_property_get_value(connector,
+		drm_object_property_get_value(&connector->base,
 			dev->mode_config.scaling_mode_property, &scalingType);
 
 	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index f2f9f38a5362..30adbbe23024 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -249,3 +249,9 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
 extern void oaktrail_hdmi_save(struct drm_device *dev);
 extern void oaktrail_hdmi_restore(struct drm_device *dev);
 extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+						struct drm_display_mode *adjusted_mode, int x, int y,
+						struct drm_framebuffer *old_fb);
+extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
+
+
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index cdafd2acc72f..3071526bc3c1 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -168,6 +168,11 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 temp;
 
+	if (pipe == 1) {
+		oaktrail_crtc_hdmi_dpms(crtc, mode);
+		return;
+	}
+
 	if (!gma_power_begin(dev, true))
 		return;
 
@@ -302,6 +307,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
 	struct drm_connector *connector;
 
+	if (pipe == 1)
+		return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+
 	if (!gma_power_begin(dev, true))
 		return 0;
 
@@ -343,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 		  (mode->crtc_vdisplay - 1));
 
 	if (psb_intel_encoder)
-		drm_connector_property_get_value(connector,
+		drm_object_property_get_value(&connector->base,
 			dev->mode_config.scaling_mode_property, &scalingType);
 
 	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 010b820744a5..08747fd7105c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = {
 	.accel_2d = 1,
 	.pipes = 2,
 	.crtcs = 2,
-	.hdmi_mask = (1 << 0),
+	.hdmi_mask = (1 << 1),
 	.lvds_mask = (1 << 0),
 	.cursor_needs_phys = 0,
 	.sgx_offset = MRST_SGX_OFFSET,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 69e51e903f35..f036f1fc161e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,6 +155,345 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
 	HDMI_READ(HDMI_HCR);
 }
 
+static void wait_for_vblank(struct drm_device *dev)
+{
+	/* Wait for 20ms, i.e. one cycle at 50hz. */
+	mdelay(20);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+	u32 htotal, new_crtc_htotal;
+
+	htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+	/*
+	 * 1024 x 768  new_crtc_htotal = 0x1024;
+	 * 1280 x 1024 new_crtc_htotal = 0x0c34;
+	 */
+	new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+	DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
+	return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+				int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+	int np_min, np_max, nr_min, nr_max;
+	int np, nr, nf;
+
+	np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+	np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+	if (np_min < oaktrail_hdmi_limit.np.min)
+		np_min = oaktrail_hdmi_limit.np.min;
+	if (np_max > oaktrail_hdmi_limit.np.max)
+		np_max = oaktrail_hdmi_limit.np.max;
+
+	nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+	nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+	if (nr_min < oaktrail_hdmi_limit.nr.min)
+		nr_min = oaktrail_hdmi_limit.nr.min;
+	if (nr_max > oaktrail_hdmi_limit.nr.max)
+		nr_max = oaktrail_hdmi_limit.nr.max;
+
+	np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+	nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+	nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+	DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+	/*
+	 * 1024 x 768  np = 1; nr = 0x26; nf = 0x0fd8000;
+	 * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+	 */
+	best_clock->np = np;
+	best_clock->nr = nr - 1;
+	best_clock->nf = (nf << 14);
+}
+
+static void scu_busy_loop(void __iomem *scu_base)
+{
+	u32 status = 0;
+	u32 loop_count = 0;
+
+	status = readl(scu_base + 0x04);
+	while (status & 1) {
+		udelay(1); /* scu processing time is in few u secods */
+		status = readl(scu_base + 0x04);
+		loop_count++;
+		/* break if scu doesn't reset busy bit after huge retry */
+		if (loop_count > 1000) {
+			DRM_DEBUG_KMS("SCU IPC timed out");
+			return;
+		}
+	}
+}
+
+/*
+ *	You don't want to know, you really really don't want to know....
+ *
+ *	This is magic. However it's safe magic because of the way the platform
+ *	works and it is necessary magic.
+ */
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+	void __iomem *base;
+	unsigned long scu_ipc_mmio = 0xff11c000UL;
+	int scu_len = 1024;
+
+	base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+	if (base == NULL) {
+		DRM_ERROR("failed to map scu mmio\n");
+		return;
+	}
+
+	/* scu ipc: assert hdmi controller reset */
+	writel(0xff11d118, base + 0x0c);
+	writel(0x7fffffdf, base + 0x80);
+	writel(0x42005, base + 0x0);
+	scu_busy_loop(base);
+
+	/* scu ipc: de-assert hdmi controller reset */
+	writel(0xff11d118, base + 0x0c);
+	writel(0x7fffffff, base + 0x80);
+	writel(0x42005, base + 0x0);
+	scu_busy_loop(base);
+
+	iounmap(base);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode,
+			    int x, int y,
+			    struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+	int pipe = 1;
+	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+	int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+	int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+	int refclk;
+	struct oaktrail_hdmi_clock clock;
+	u32 dspcntr, pipeconf, dpll, temp;
+	int dspcntr_reg = DSPBCNTR;
+
+	if (!gma_power_begin(dev, true))
+		return 0;
+
+	/* Disable the VGA plane that we never use */
+	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+	/* Disable dpll if necessary */
+	dpll = REG_READ(DPLL_CTRL);
+	if ((dpll & DPLL_PWRDN) == 0) {
+		REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+		REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+		REG_WRITE(DPLL_STATUS, 0x1);
+	}
+	udelay(150);
+
+	/* Reset controller */
+	oaktrail_hdmi_reset(dev);
+
+	/* program and enable dpll */
+	refclk = 25000;
+	oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+	/* Set the DPLL */
+	dpll = REG_READ(DPLL_CTRL);
+	dpll &= ~DPLL_PDIV_MASK;
+	dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+	REG_WRITE(DPLL_CTRL, 0x00000008);
+	REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+	REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+	REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+	REG_WRITE(DPLL_UPDATE, 0x80000000);
+	REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+	udelay(150);
+
+	/* configure HDMI */
+	HDMI_WRITE(0x1004, 0x1fd);
+	HDMI_WRITE(0x2000, 0x1);
+	HDMI_WRITE(0x2008, 0x0);
+	HDMI_WRITE(0x3130, 0x8);
+	HDMI_WRITE(0x101c, 0x1800810);
+
+	temp = htotal_calculate(adjusted_mode);
+	REG_WRITE(htot_reg, temp);
+	REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+	REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+	REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+	REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+	REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+	REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+	REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+	REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+	REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+	temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+	HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) |  temp);
+
+	REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+	REG_WRITE(dsppos_reg, 0);
+
+	/* Flush the plane changes */
+	{
+		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+	}
+
+	/* Set up the display plane register */
+	dspcntr = REG_READ(dspcntr_reg);
+	dspcntr |= DISPPLANE_GAMMA_ENABLE;
+	dspcntr |= DISPPLANE_SEL_PIPE_B;
+	dspcntr |= DISPLAY_PLANE_ENABLE;
+
+	/* setup pipeconf */
+	pipeconf = REG_READ(pipeconf_reg);
+	pipeconf |= PIPEACONF_ENABLE;
+
+	REG_WRITE(pipeconf_reg, pipeconf);
+	REG_READ(pipeconf_reg);
+
+	REG_WRITE(PCH_PIPEBCONF, pipeconf);
+	REG_READ(PCH_PIPEBCONF);
+	wait_for_vblank(dev);
+
+	REG_WRITE(dspcntr_reg, dspcntr);
+	wait_for_vblank(dev);
+
+	gma_power_end(dev);
+
+	return 0;
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	u32 temp;
+
+	DRM_DEBUG_KMS("%s %d\n", __func__, mode);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_OFF:
+		REG_WRITE(VGACNTRL, 0x80000000);
+
+		/* Disable plane */
+		temp = REG_READ(DSPBCNTR);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+			REG_READ(DSPBCNTR);
+			/* Flush the plane changes */
+			REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+			REG_READ(DSPBSURF);
+		}
+
+		/* Disable pipe B */
+		temp = REG_READ(PIPEBCONF);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+			REG_READ(PIPEBCONF);
+		}
+
+		/* Disable LNW Pipes, etc */
+		temp = REG_READ(PCH_PIPEBCONF);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+			REG_READ(PCH_PIPEBCONF);
+		}
+
+		/* wait for pipe off */
+		udelay(150);
+
+		/* Disable dpll */
+		temp = REG_READ(DPLL_CTRL);
+		if ((temp & DPLL_PWRDN) == 0) {
+			REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+			REG_WRITE(DPLL_STATUS, 0x1);
+		}
+
+		/* wait for dpll off */
+		udelay(150);
+
+		break;
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		/* Enable dpll */
+		temp = REG_READ(DPLL_CTRL);
+		if ((temp & DPLL_PWRDN) != 0) {
+			REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+			temp = REG_READ(DPLL_CLK_ENABLE);
+			REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+			REG_READ(DPLL_CLK_ENABLE);
+		}
+		/* wait for dpll warm up */
+		udelay(150);
+
+		/* Enable pipe B */
+		temp = REG_READ(PIPEBCONF);
+		if ((temp & PIPEACONF_ENABLE) == 0) {
+			REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+			REG_READ(PIPEBCONF);
+		}
+
+		/* Enable LNW Pipe B */
+		temp = REG_READ(PCH_PIPEBCONF);
+		if ((temp & PIPEACONF_ENABLE) == 0) {
+			REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+			REG_READ(PCH_PIPEBCONF);
+		}
+
+		wait_for_vblank(dev);
+
+		/* Enable plane */
+		temp = REG_READ(DSPBCNTR);
+		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+			REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+			REG_READ(DSPBSURF);
+		}
+
+		psb_intel_crtc_load_lut(crtc);
+	}
+
+	/* DSPARB */
+	REG_WRITE(DSPARB, 0x00003fbf);
+
+	/* FW1 */
+	REG_WRITE(0x70034, 0x3f880a0a);
+
+	/* FW2 */
+	REG_WRITE(0x70038, 0x0b060808);
+
+	/* FW4 */
+	REG_WRITE(0x70050, 0x08030404);
+
+	/* FW5 */
+	REG_WRITE(0x70054, 0x04040404);
+
+	/* LNC Chicken Bits - Squawk! */
+	REG_WRITE(0x70400, 0x4000);
+
+	return;
+}
+
 static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
 {
 	static int dpms_mode = -1;
@@ -233,13 +572,15 @@ static const unsigned char raw_edid[] = {
 
 static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
 {
-	struct drm_device *dev = connector->dev;
-	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct i2c_adapter *i2c_adap;
 	struct edid *edid;
-	struct drm_display_mode *mode, *t;
-	int i = 0, ret = 0;
+	int ret = 0;
 
+	/*
+	 *	FIXME: We need to figure this lot out. In theory we can
+	 *	read the EDID somehow but I've yet to find working reference
+	 *	code.
+	 */
 	i2c_adap = i2c_get_adapter(3);
 	if (i2c_adap == NULL) {
 		DRM_ERROR("No ddc adapter available!\n");
@@ -253,17 +594,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
 	}
-
-	/*
-	 * prune modes that require frame buffer bigger than stolen mem
-	 */
-	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
-		if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
-			i++;
-			drm_mode_remove(connector, mode);
-		}
-	}
-	return ret - i;
+	return ret;
 }
 
 static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
@@ -349,6 +680,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
 	connector->interlace_allowed = false;
 	connector->doublescan_allowed = false;
 	drm_sysfs_connector_add(connector);
+	dev_info(dev->dev, "HDMI initialised.\n");
 
 	return;
 
@@ -403,6 +735,9 @@ void oaktrail_hdmi_setup(struct drm_device *dev)
 
 	dev_priv->hdmi_priv = hdmi_dev;
 	oaktrail_hdmi_audio_disable(dev);
+
+	dev_info(dev->dev, "HDMI hardware present.\n");
+
 	return;
 
 free:
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 558c77fb55ec..325013a9c48c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
 		return;
 	}
 
-	drm_connector_property_get_value(
-		connector,
+	drm_object_property_get_value(
+		&connector->base,
 		dev->mode_config.scaling_mode_property,
 		&v);
 
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
 	connector->interlace_allowed = false;
 	connector->doublescan_allowed = false;
 
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 					dev->mode_config.scaling_mode_property,
 					DRM_MODE_SCALE_FULLSCREEN);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 					dev_priv->backlight_property,
 					BRIGHTNESS_MAX_LEVEL);
 
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 2a4c3a9e33e3..9fa5fa2e6192 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
 			goto set_prop_error;
 		}
 
-		if (drm_connector_property_get_value(connector,
+		if (drm_object_property_get_value(&connector->base,
 						     property,
 						     &curval))
 			goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
 		if (curval == value)
 			goto set_prop_done;
 
-		if (drm_connector_property_set_value(connector,
+		if (drm_object_property_set_value(&connector->base,
 							property,
 							value))
 			goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
 				goto set_prop_error;
 		}
 	} else if (!strcmp(property->name, "backlight")) {
-		if (drm_connector_property_set_value(connector,
+		if (drm_object_property_set_value(&connector->base,
 							property,
 							value))
 			goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
 	connector->doublescan_allowed = false;
 
 	/*Attach connector properties*/
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.scaling_mode_property,
 				      DRM_MODE_SCALE_FULLSCREEN);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev_priv->backlight_property,
 				      BRIGHTNESS_MAX_LEVEL);
 
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index fc9292705dbf..a4cc777ab7a6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
 	uint8_t cmd;
 	int ret;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret)
 		return ret;
 
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
 	} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
 		temp_value = val;
 		if (psb_intel_sdvo_connector->left == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 psb_intel_sdvo_connector->right, val);
 			if (psb_intel_sdvo_connector->left_margin == temp_value)
 				return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
 			goto set_value;
 		} else if (psb_intel_sdvo_connector->right == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 psb_intel_sdvo_connector->left, val);
 			if (psb_intel_sdvo_connector->right_margin == temp_value)
 				return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
 			goto set_value;
 		} else if (psb_intel_sdvo_connector->top == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 psb_intel_sdvo_connector->bottom, val);
 			if (psb_intel_sdvo_connector->top_margin == temp_value)
 				return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
 			cmd = SDVO_CMD_SET_OVERSCAN_V;
 			goto set_value;
 		} else if (psb_intel_sdvo_connector->bottom == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 psb_intel_sdvo_connector->top, val);
 			if (psb_intel_sdvo_connector->bottom_margin == temp_value)
 				return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
 				i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
 
 	psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
-	drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+	drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
 				      psb_intel_sdvo_connector->tv_format, 0);
 	return true;
 
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
 		psb_intel_sdvo_connector->name = \
 			drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
 		if (!psb_intel_sdvo_connector->name) return false; \
-		drm_connector_attach_property(connector, \
+		drm_object_attach_property(&connector->base, \
 					      psb_intel_sdvo_connector->name, \
 					      psb_intel_sdvo_connector->cur_##name); \
 		DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
 		if (!psb_intel_sdvo_connector->left)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      psb_intel_sdvo_connector->left,
 					      psb_intel_sdvo_connector->left_margin);
 
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
 		if (!psb_intel_sdvo_connector->right)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      psb_intel_sdvo_connector->right,
 					      psb_intel_sdvo_connector->right_margin);
 		DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
 		if (!psb_intel_sdvo_connector->top)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      psb_intel_sdvo_connector->top,
 					      psb_intel_sdvo_connector->top_margin);
 
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
 		if (!psb_intel_sdvo_connector->bottom)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      psb_intel_sdvo_connector->bottom,
 					      psb_intel_sdvo_connector->bottom_margin);
 		DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
 		if (!psb_intel_sdvo_connector->dot_crawl)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      psb_intel_sdvo_connector->dot_crawl,
 					      psb_intel_sdvo_connector->cur_dot_crawl);
 		DRM_DEBUG_KMS("dot crawl: current %d\n", response);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 599099fe76e3..b865d0728e28 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
 	else
 		priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
 
-	drm_connector_property_set_value(connector,
+	drm_object_property_set_value(&connector->base,
 			encoder->dev->mode_config.tv_subconnector_property,
 							priv->subconnector);
 
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
 
 	priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
 
-	drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+	drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
 				      priv->select_subconnector);
-	drm_connector_attach_property(connector, conf->tv_subconnector_property,
+	drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
 				      priv->subconnector);
-	drm_connector_attach_property(connector, conf->tv_left_margin_property,
+	drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
 				      priv->hmargin);
-	drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+	drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
 				      priv->vmargin);
-	drm_connector_attach_property(connector, conf->tv_mode_property,
+	drm_object_attach_property(&connector->base, conf->tv_mode_property,
 				      priv->norm);
-	drm_connector_attach_property(connector, conf->tv_brightness_property,
+	drm_object_attach_property(&connector->base, conf->tv_brightness_property,
 				      priv->brightness);
-	drm_connector_attach_property(connector, conf->tv_contrast_property,
+	drm_object_attach_property(&connector->base, conf->tv_contrast_property,
 				      priv->contrast);
-	drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+	drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
 				      priv->flicker);
-	drm_connector_attach_property(connector, priv->scale_property,
+	drm_object_attach_property(&connector->base, priv->scale_property,
 				      priv->scale);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dde8b505bf7f..e6a11ca85eaf 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
 				   pipe, plane);
 		} else {
-			if (!work->pending) {
+			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
 				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
 					   pipe, plane);
 			} else {
@@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 				seq_printf(m, "Stall check enabled, ");
 			else
 				seq_printf(m, "Stall check waiting for page flip ioctl, ");
-			seq_printf(m, "%d prepares\n", work->pending);
+			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
 			if (work->old_fb_obj) {
 				struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -655,10 +655,12 @@ static void i915_ring_error_state(struct seq_file *m,
 	if (INTEL_INFO(dev)->gen >= 6) {
 		seq_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
 		seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
-		seq_printf(m, "  SYNC_0: 0x%08x\n",
-			   error->semaphore_mboxes[ring][0]);
-		seq_printf(m, "  SYNC_1: 0x%08x\n",
-			   error->semaphore_mboxes[ring][1]);
+		seq_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
+			   error->semaphore_mboxes[ring][0],
+			   error->semaphore_seqno[ring][0]);
+		seq_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
+			   error->semaphore_mboxes[ring][1],
+			   error->semaphore_seqno[ring][1]);
 	}
 	seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
 	seq_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
@@ -1068,7 +1070,7 @@ static int gen6_drpc_info(struct seq_file *m)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 rpmodectl1, gt_core_status, rcctl1;
+	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
 	unsigned forcewake_count;
 	int count=0, ret;
 
@@ -1097,6 +1099,9 @@ static int gen6_drpc_info(struct seq_file *m)
 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
 	mutex_unlock(&dev->struct_mutex);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	seq_printf(m, "Video Turbo Mode: %s\n",
 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@@ -1148,6 +1153,12 @@ static int gen6_drpc_info(struct seq_file *m)
 	seq_printf(m, "RC6++ residency since boot: %u\n",
 		   I915_READ(GEN6_GT_GFX_RC6pp));
 
+	seq_printf(m, "RC6   voltage: %dmV\n",
+		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+	seq_printf(m, "RC6+  voltage: %dmV\n",
+		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+	seq_printf(m, "RC6++ voltage: %dmV\n",
+		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
 	return 0;
 }
 
@@ -1273,7 +1284,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 		return 0;
 	}
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
 
@@ -1282,19 +1293,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 	for (gpu_freq = dev_priv->rps.min_delay;
 	     gpu_freq <= dev_priv->rps.max_delay;
 	     gpu_freq++) {
-		I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
-		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
-			   GEN6_PCODE_READ_MIN_FREQ_TABLE);
-		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
-			      GEN6_PCODE_READY) == 0, 10)) {
-			DRM_ERROR("pcode read of freq table timed out\n");
-			continue;
-		}
-		ia_freq = I915_READ(GEN6_PCODE_DATA);
+		ia_freq = gpu_freq;
+		sandybridge_pcode_read(dev_priv,
+				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
+				       &ia_freq);
 		seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
 	}
 
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return 0;
 }
@@ -1398,15 +1404,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
 	if (ret)
 		return ret;
 
-	if (dev_priv->pwrctx) {
+	if (dev_priv->ips.pwrctx) {
 		seq_printf(m, "power context ");
-		describe_obj(m, dev_priv->pwrctx);
+		describe_obj(m, dev_priv->ips.pwrctx);
 		seq_printf(m, "\n");
 	}
 
-	if (dev_priv->renderctx) {
+	if (dev_priv->ips.renderctx) {
 		seq_printf(m, "render context ");
-		describe_obj(m, dev_priv->renderctx);
+		describe_obj(m, dev_priv->ips.renderctx);
 		seq_printf(m, "\n");
 	}
 
@@ -1711,13 +1717,13 @@ i915_max_freq_read(struct file *filp,
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
 
 	len = snprintf(buf, sizeof(buf),
 		       "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -1752,7 +1758,7 @@ i915_max_freq_write(struct file *filp,
 
 	DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
 
@@ -1762,7 +1768,7 @@ i915_max_freq_write(struct file *filp,
 	dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
 
 	gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return cnt;
 }
@@ -1787,13 +1793,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
 
 	len = snprintf(buf, sizeof(buf),
 		       "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -1826,7 +1832,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
 
 	DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
 
@@ -1836,7 +1842,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
 	dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
 
 	gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return cnt;
 }
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 61ae104dca8c..8f63cd5de4b4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -103,32 +103,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
 	I915_WRITE(HWS_PGA, addr);
 }
 
-/**
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
- */
-static int i915_init_phys_hws(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	/* Program Hardware Status Page */
-	dev_priv->status_page_dmah =
-		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
-
-	if (!dev_priv->status_page_dmah) {
-		DRM_ERROR("Can not allocate hardware status page\n");
-		return -ENOMEM;
-	}
-
-	memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
-		  0, PAGE_SIZE);
-
-	i915_write_hws_pga(dev);
-
-	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
-	return 0;
-}
-
 /**
  * Frees the hardware status page, whether it's a physical address or a virtual
  * address set up by the X Server.
@@ -167,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
 
 	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
 	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-	ring->space = ring->head - (ring->tail + 8);
+	ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
 	if (ring->space < 0)
 		ring->space += ring->size;
 
@@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
-	dev_priv->counter++;
-	if (dev_priv->counter > 0x7FFFFFFFUL)
-		dev_priv->counter = 0;
+	dev_priv->dri1.counter++;
+	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+		dev_priv->dri1.counter = 0;
 	if (master_priv->sarea_priv)
-		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
 
 	if (BEGIN_LP_RING(4) == 0) {
 		OUT_RING(MI_STORE_DWORD_INDEX);
 		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-		OUT_RING(dev_priv->counter);
+		OUT_RING(dev_priv->dri1.counter);
 		OUT_RING(0);
 		ADVANCE_LP_RING();
 	}
@@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
 
 	ADVANCE_LP_RING();
 
-	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+	master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
 
 	if (BEGIN_LP_RING(4) == 0) {
 		OUT_RING(MI_STORE_DWORD_INDEX);
 		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-		OUT_RING(dev_priv->counter);
+		OUT_RING(dev_priv->dri1.counter);
 		OUT_RING(0);
 		ADVANCE_LP_RING();
 	}
@@ -618,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
 
 static int i915_quiescent(struct drm_device *dev)
 {
-	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
-
 	i915_kernel_lost_context(dev);
-	return intel_wait_ring_idle(ring);
+	return intel_ring_idle(LP_RING(dev->dev_private));
 }
 
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -775,21 +747,21 @@ static int i915_emit_irq(struct drm_device * dev)
 
 	DRM_DEBUG_DRIVER("\n");
 
-	dev_priv->counter++;
-	if (dev_priv->counter > 0x7FFFFFFFUL)
-		dev_priv->counter = 1;
+	dev_priv->dri1.counter++;
+	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+		dev_priv->dri1.counter = 1;
 	if (master_priv->sarea_priv)
-		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
 
 	if (BEGIN_LP_RING(4) == 0) {
 		OUT_RING(MI_STORE_DWORD_INDEX);
 		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-		OUT_RING(dev_priv->counter);
+		OUT_RING(dev_priv->dri1.counter);
 		OUT_RING(MI_USER_INTERRUPT);
 		ADVANCE_LP_RING();
 	}
 
-	return dev_priv->counter;
+	return dev_priv->dri1.counter;
 }
 
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -820,7 +792,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 
 	if (ret == -EBUSY) {
 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
-			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+			  READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
 	}
 
 	return ret;
@@ -1014,6 +986,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
 		value = 1;
 		break;
+	case I915_PARAM_HAS_SECURE_BATCHES:
+		value = capable(CAP_SYS_ADMIN);
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
@@ -1068,7 +1043,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_hws_addr_t *hws = data;
-	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+	struct intel_ring_buffer *ring;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return -ENODEV;
@@ -1088,6 +1063,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 
 	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
 
+	ring = LP_RING(dev_priv);
 	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
 
 	dev_priv->dri1.gfx_hws_cpu_addr =
@@ -1326,6 +1302,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
 	intel_modeset_gem_init(dev);
 
+	INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
+
 	ret = drm_irq_install(dev);
 	if (ret)
 		goto cleanup_gem;
@@ -1491,19 +1469,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 		goto free_priv;
 	}
 
-	ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
-	if (!ret) {
-		DRM_ERROR("failed to set up gmch\n");
-		ret = -EIO;
+	ret = i915_gem_gtt_init(dev);
+	if (ret)
 		goto put_bridge;
-	}
-
-	dev_priv->mm.gtt = intel_gtt_get();
-	if (!dev_priv->mm.gtt) {
-		DRM_ERROR("Failed to initialize GTT\n");
-		ret = -ENODEV;
-		goto put_gmch;
-	}
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_kick_out_firmware_fb(dev_priv);
@@ -1590,18 +1558,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	intel_setup_gmbus(dev);
 	intel_opregion_setup(dev);
 
-	/* Make sure the bios did its job and set up vital registers */
 	intel_setup_bios(dev);
 
 	i915_gem_load(dev);
 
-	/* Init HWS */
-	if (!I915_NEED_GFX_HWS(dev)) {
-		ret = i915_init_phys_hws(dev);
-		if (ret)
-			goto out_gem_unload;
-	}
-
 	/* On the 945G/GM, the chipset reports the MSI capability on the
 	 * integrated graphics even though the support isn't actually there
 	 * according to the published specs.  It doesn't appear to function
@@ -1621,6 +1581,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	spin_lock_init(&dev_priv->rps.lock);
 	spin_lock_init(&dev_priv->dpio_lock);
 
+	mutex_init(&dev_priv->rps.hw_lock);
+
 	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 		dev_priv->num_pipe = 3;
 	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@@ -1678,7 +1640,7 @@ out_mtrrfree:
 out_rmmap:
 	pci_iounmap(dev->pdev, dev_priv->regs);
 put_gmch:
-	intel_gmch_remove();
+	i915_gem_gtt_fini(dev);
 put_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
 free_priv:
@@ -1721,6 +1683,7 @@ int i915_driver_unload(struct drm_device *dev)
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		intel_fbdev_fini(dev);
 		intel_modeset_cleanup(dev);
+		cancel_work_sync(&dev_priv->console_resume_work);
 
 		/*
 		 * free the memory space allocated for the child device
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6770ee6084b4..530db83ef320 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
 unsigned int i915_fbpercrtc __always_unused = 0;
 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 
-int i915_panel_ignore_lid __read_mostly = 0;
+int i915_panel_ignore_lid __read_mostly = 1;
 module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
 MODULE_PARM_DESC(panel_ignore_lid,
-		"Override lid status (0=autodetect [default], 1=lid open, "
-		"-1=lid closed)");
+		"Override lid status (0=autodetect, 1=autodetect disabled [default], "
+		"-1=force lid closed, -2=force lid open)");
 
 unsigned int i915_powersave __read_mostly = 1;
 module_param_named(powersave, i915_powersave, int, 0600);
@@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = {		/* aka */
 MODULE_DEVICE_TABLE(pci, pciidlist);
 #endif
 
-#define INTEL_PCH_DEVICE_ID_MASK	0xff00
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE	0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE	0x8c00
-
 void intel_detect_pch(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -416,26 +410,36 @@ void intel_detect_pch(struct drm_device *dev)
 	pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
 	if (pch) {
 		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
-			int id;
+			unsigned short id;
 			id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+			dev_priv->pch_id = id;
 
 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_IBX;
 				dev_priv->num_pch_pll = 2;
 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+				WARN_ON(!IS_GEN5(dev));
 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_CPT;
 				dev_priv->num_pch_pll = 2;
 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 				/* PantherPoint is CPT compatible */
 				dev_priv->pch_type = PCH_CPT;
 				dev_priv->num_pch_pll = 2;
 				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_LPT;
 				dev_priv->num_pch_pll = 0;
 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+				WARN_ON(!IS_HASWELL(dev));
+			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_LPT;
+				dev_priv->num_pch_pll = 0;
+				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+				WARN_ON(!IS_HASWELL(dev));
 			}
 			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
 		}
@@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev)
 			return error;
 		}
 
+		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+
 		intel_modeset_disable(dev);
 
 		drm_irq_uninstall(dev);
@@ -526,24 +532,29 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
 	return 0;
 }
 
-static int i915_drm_thaw(struct drm_device *dev)
+void intel_console_resume(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private,
+			     console_resume_work);
+	struct drm_device *dev = dev_priv->dev;
+
+	console_lock();
+	intel_fbdev_set_suspend(dev, 0);
+	console_unlock();
+}
+
+static int __i915_drm_thaw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int error = 0;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		mutex_lock(&dev->struct_mutex);
-		i915_gem_restore_gtt_mappings(dev);
-		mutex_unlock(&dev->struct_mutex);
-	}
-
 	i915_restore_state(dev);
 	intel_opregion_setup(dev);
 
 	/* KMS EnterVT equivalent */
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-			ironlake_init_pch_refclk(dev);
+		intel_init_pch_refclk(dev);
 
 		mutex_lock(&dev->struct_mutex);
 		dev_priv->mm.suspended = 0;
@@ -552,8 +563,7 @@ static int i915_drm_thaw(struct drm_device *dev)
 		mutex_unlock(&dev->struct_mutex);
 
 		intel_modeset_init_hw(dev);
-		intel_modeset_setup_hw_state(dev);
-		drm_mode_config_reset(dev);
+		intel_modeset_setup_hw_state(dev, false);
 		drm_irq_install(dev);
 	}
 
@@ -561,14 +571,41 @@ static int i915_drm_thaw(struct drm_device *dev)
 
 	dev_priv->modeset_on_lid = 0;
 
-	console_lock();
-	intel_fbdev_set_suspend(dev, 0);
-	console_unlock();
+	/*
+	 * The console lock can be pretty contented on resume due
+	 * to all the printk activity.  Try to keep it out of the hot
+	 * path of resume if possible.
+	 */
+	if (console_trylock()) {
+		intel_fbdev_set_suspend(dev, 0);
+		console_unlock();
+	} else {
+		schedule_work(&dev_priv->console_resume_work);
+	}
+
+	return error;
+}
+
+static int i915_drm_thaw(struct drm_device *dev)
+{
+	int error = 0;
+
+	intel_gt_reset(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_restore_gtt_mappings(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	__i915_drm_thaw(dev);
+
 	return error;
 }
 
 int i915_resume(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -579,7 +616,20 @@ int i915_resume(struct drm_device *dev)
 
 	pci_set_master(dev->pdev);
 
-	ret = i915_drm_thaw(dev);
+	intel_gt_reset(dev);
+
+	/*
+	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
+	 * earlier) need this since the BIOS might clear all our scratch PTEs.
+	 */
+	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+	    !dev_priv->opregion.header) {
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_restore_gtt_mappings(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	ret = __i915_drm_thaw(dev);
 	if (ret)
 		return ret;
 
@@ -833,7 +883,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct intel_device_info *intel_info =
 		(struct intel_device_info *) ent->driver_data;
 
-	if (intel_info->is_haswell || intel_info->is_valleyview)
+	if (intel_info->is_valleyview)
 		if(!i915_preliminary_hw_support) {
 			DRM_ERROR("Preliminary hardware support disabled\n");
 			return -ENODEV;
@@ -1140,12 +1190,40 @@ static bool IS_DISPLAYREG(u32 reg)
 	if (reg == GEN6_GDRST)
 		return false;
 
+	switch (reg) {
+	case _3D_CHICKEN3:
+	case IVB_CHICKEN3:
+	case GEN7_COMMON_SLICE_CHICKEN1:
+	case GEN7_L3CNTLREG1:
+	case GEN7_L3_CHICKEN_MODE_REGISTER:
+	case GEN7_ROW_CHICKEN2:
+	case GEN7_L3SQCREG4:
+	case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
+	case GEN7_HALF_SLICE_CHICKEN1:
+	case GEN6_MBCTL:
+	case GEN6_UCGCTL2:
+		return false;
+	default:
+		break;
+	}
+
 	return true;
 }
 
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+	/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
+	 * chip from rc6 before touching it for real. MI_MODE is masked, hence
+	 * harmless to write 0 into. */
+	I915_WRITE_NOTRACE(MI_MODE, 0);
+}
+
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
 	u##x val = 0; \
+	if (IS_GEN5(dev_priv->dev)) \
+		ilk_dummy_write(dev_priv); \
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 		unsigned long irqflags; \
 		spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@@ -1177,6 +1255,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
 	} \
+	if (IS_GEN5(dev_priv->dev)) \
+		ilk_dummy_write(dev_priv); \
+	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+		DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
+		I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
+	} \
 	if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
 		write##y(val, dev_priv->regs + reg + 0x180000);		\
 	} else {							\
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f511fa2f4168..557843dd4b2e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,6 +58,14 @@ enum pipe {
 };
 #define pipe_name(p) ((p) + 'A')
 
+enum transcoder {
+	TRANSCODER_A = 0,
+	TRANSCODER_B,
+	TRANSCODER_C,
+	TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
+
 enum plane {
 	PLANE_A = 0,
 	PLANE_B,
@@ -93,6 +101,12 @@ struct intel_pch_pll {
 };
 #define I915_NUM_PLLS 2
 
+struct intel_ddi_plls {
+	int spll_refcount;
+	int wrpll1_refcount;
+	int wrpll2_refcount;
+};
+
 /* Interface history:
  *
  * 1.1: Original.
@@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object {
 	struct drm_i915_gem_object *cur_obj;
 };
 
-struct mem_block {
-	struct mem_block *next;
-	struct mem_block *prev;
-	int start;
-	int size;
-	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
-
 struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
@@ -191,6 +197,7 @@ struct drm_i915_error_state {
 	u32 instdone[I915_NUM_RINGS];
 	u32 acthd[I915_NUM_RINGS];
 	u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+	u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
 	u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
 	/* our own tracking of ring head and tail */
 	u32 cpu_ring_head[I915_NUM_RINGS];
@@ -251,6 +258,7 @@ struct drm_i915_display_funcs {
 				 uint32_t sprite_width, int pixel_size);
 	void (*update_linetime_wm)(struct drm_device *dev, int pipe,
 				 struct drm_display_mode *mode);
+	void (*modeset_global_resources)(struct drm_device *dev);
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
 			     struct drm_display_mode *mode,
 			     struct drm_display_mode *adjusted_mode,
@@ -263,7 +271,6 @@ struct drm_i915_display_funcs {
 			  struct drm_crtc *crtc);
 	void (*fdi_link_train)(struct drm_crtc *crtc);
 	void (*init_clock_gating)(struct drm_device *dev);
-	void (*init_pch_clock_gating)(struct drm_device *dev);
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
 			  struct drm_framebuffer *fb,
 			  struct drm_i915_gem_object *obj);
@@ -338,6 +345,7 @@ struct intel_device_info {
 #define I915_PPGTT_PD_ENTRIES 512
 #define I915_PPGTT_PT_ENTRIES 1024
 struct i915_hw_ppgtt {
+	struct drm_device *dev;
 	unsigned num_pd_entries;
 	struct page **pt_pages;
 	uint32_t pd_offset;
@@ -374,6 +382,11 @@ enum intel_pch {
 	PCH_LPT,	/* Lynxpoint PCH */
 };
 
+enum intel_sbi_destination {
+	SBI_ICLK,
+	SBI_MPHY,
+};
+
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -383,154 +396,18 @@ struct intel_fbc_work;
 
 struct intel_gmbus {
 	struct i2c_adapter adapter;
-	bool force_bit;
+	u32 force_bit;
 	u32 reg0;
 	u32 gpio_reg;
 	struct i2c_algo_bit_data bit_algo;
 	struct drm_i915_private *dev_priv;
 };
 
-typedef struct drm_i915_private {
-	struct drm_device *dev;
-
-	const struct intel_device_info *info;
-
-	int relative_constants_mode;
-
-	void __iomem *regs;
-
-	struct drm_i915_gt_funcs gt;
-	/** gt_fifo_count and the subsequent register write are synchronized
-	 * with dev->struct_mutex. */
-	unsigned gt_fifo_count;
-	/** forcewake_count is protected by gt_lock */
-	unsigned forcewake_count;
-	/** gt_lock is also taken in irq contexts. */
-	struct spinlock gt_lock;
-
-	struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
-
-	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
-	 * controller on different i2c buses. */
-	struct mutex gmbus_mutex;
-
-	/**
-	 * Base address of the gmbus and gpio block.
-	 */
-	uint32_t gpio_mmio_base;
-
-	struct pci_dev *bridge_dev;
-	struct intel_ring_buffer ring[I915_NUM_RINGS];
-	uint32_t next_seqno;
-
-	drm_dma_handle_t *status_page_dmah;
-	uint32_t counter;
-	struct drm_i915_gem_object *pwrctx;
-	struct drm_i915_gem_object *renderctx;
-
-	struct resource mch_res;
-
-	atomic_t irq_received;
-
-	/* protects the irq masks */
-	spinlock_t irq_lock;
-
-	/* DPIO indirect register protection */
-	spinlock_t dpio_lock;
-
-	/** Cached value of IMR to avoid reads in updating the bitfield */
-	u32 pipestat[2];
-	u32 irq_mask;
-	u32 gt_irq_mask;
-	u32 pch_irq_mask;
-
-	u32 hotplug_supported_mask;
-	struct work_struct hotplug_work;
-
-	int num_pipe;
-	int num_pch_pll;
-
-	/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-	struct timer_list hangcheck_timer;
-	int hangcheck_count;
-	uint32_t last_acthd[I915_NUM_RINGS];
-	uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
-
-	unsigned int stop_rings;
-
-	unsigned long cfb_size;
-	unsigned int cfb_fb;
-	enum plane cfb_plane;
-	int cfb_y;
-	struct intel_fbc_work *fbc_work;
-
-	struct intel_opregion opregion;
-
-	/* overlay */
-	struct intel_overlay *overlay;
-	bool sprite_scaling_enabled;
-
-	/* LVDS info */
-	int backlight_level;  /* restore backlight to this value */
-	bool backlight_enabled;
-	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
-	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
-	/* Feature bits from the VBIOS */
-	unsigned int int_tv_support:1;
-	unsigned int lvds_dither:1;
-	unsigned int lvds_vbt:1;
-	unsigned int int_crt_support:1;
-	unsigned int lvds_use_ssc:1;
-	unsigned int display_clock_mode:1;
-	int lvds_ssc_freq;
-	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
-	unsigned int lvds_val; /* used for checking LVDS channel mode */
-	struct {
-		int rate;
-		int lanes;
-		int preemphasis;
-		int vswing;
-
-		bool initialized;
-		bool support;
-		int bpp;
-		struct edp_power_seq pps;
-	} edp;
-	bool no_aux_handshake;
-
-	struct notifier_block lid_notifier;
-
-	int crt_ddc_pin;
-	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
-	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
-	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
-	unsigned int fsb_freq, mem_freq, is_ddr3;
-
-	spinlock_t error_lock;
-	/* Protected by dev->error_lock. */
-	struct drm_i915_error_state *first_error;
-	struct work_struct error_work;
-	struct completion error_completion;
-	struct workqueue_struct *wq;
-
-	/* Display functions */
-	struct drm_i915_display_funcs display;
-
-	/* PCH chipset type */
-	enum intel_pch pch_type;
-
-	unsigned long quirks;
-
-	/* Register state */
-	bool modeset_on_lid;
+struct i915_suspend_saved_registers {
 	u8 saveLBB;
 	u32 saveDSPACNTR;
 	u32 saveDSPBCNTR;
 	u32 saveDSPARB;
-	u32 saveHWS;
 	u32 savePIPEACONF;
 	u32 savePIPEBCONF;
 	u32 savePIPEASRC;
@@ -676,10 +553,206 @@ typedef struct drm_i915_private {
 	u32 savePIPEB_LINK_N1;
 	u32 saveMCHBAR_RENDER_STANDBY;
 	u32 savePCH_PORT_HOTPLUG;
+};
+
+struct intel_gen6_power_mgmt {
+	struct work_struct work;
+	u32 pm_iir;
+	/* lock - irqsave spinlock that protectects the work_struct and
+	 * pm_iir. */
+	spinlock_t lock;
+
+	/* The below variables an all the rps hw state are protected by
+	 * dev->struct mutext. */
+	u8 cur_delay;
+	u8 min_delay;
+	u8 max_delay;
+
+	struct delayed_work delayed_resume_work;
+
+	/*
+	 * Protects RPS/RC6 register access and PCU communication.
+	 * Must be taken after struct_mutex if nested.
+	 */
+	struct mutex hw_lock;
+};
+
+struct intel_ilk_power_mgmt {
+	u8 cur_delay;
+	u8 min_delay;
+	u8 max_delay;
+	u8 fmax;
+	u8 fstart;
+
+	u64 last_count1;
+	unsigned long last_time1;
+	unsigned long chipset_power;
+	u64 last_count2;
+	struct timespec last_time2;
+	unsigned long gfx_power;
+	u8 corr;
+
+	int c_m;
+	int r_t;
+
+	struct drm_i915_gem_object *pwrctx;
+	struct drm_i915_gem_object *renderctx;
+};
+
+struct i915_dri1_state {
+	unsigned allow_batchbuffer : 1;
+	u32 __iomem *gfx_hws_cpu_addr;
+
+	unsigned int cpp;
+	int back_offset;
+	int front_offset;
+	int current_page;
+	int page_flipping;
+
+	uint32_t counter;
+};
+
+struct intel_l3_parity {
+	u32 *remap_info;
+	struct work_struct error_work;
+};
+
+typedef struct drm_i915_private {
+	struct drm_device *dev;
+
+	const struct intel_device_info *info;
+
+	int relative_constants_mode;
+
+	void __iomem *regs;
+
+	struct drm_i915_gt_funcs gt;
+	/** gt_fifo_count and the subsequent register write are synchronized
+	 * with dev->struct_mutex. */
+	unsigned gt_fifo_count;
+	/** forcewake_count is protected by gt_lock */
+	unsigned forcewake_count;
+	/** gt_lock is also taken in irq contexts. */
+	struct spinlock gt_lock;
+
+	struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
+	 * controller on different i2c buses. */
+	struct mutex gmbus_mutex;
+
+	/**
+	 * Base address of the gmbus and gpio block.
+	 */
+	uint32_t gpio_mmio_base;
+
+	struct pci_dev *bridge_dev;
+	struct intel_ring_buffer ring[I915_NUM_RINGS];
+	uint32_t next_seqno;
+
+	drm_dma_handle_t *status_page_dmah;
+	struct resource mch_res;
+
+	atomic_t irq_received;
+
+	/* protects the irq masks */
+	spinlock_t irq_lock;
+
+	/* DPIO indirect register protection */
+	spinlock_t dpio_lock;
+
+	/** Cached value of IMR to avoid reads in updating the bitfield */
+	u32 pipestat[2];
+	u32 irq_mask;
+	u32 gt_irq_mask;
+	u32 pch_irq_mask;
+
+	u32 hotplug_supported_mask;
+	struct work_struct hotplug_work;
+
+	int num_pipe;
+	int num_pch_pll;
+
+	/* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+	struct timer_list hangcheck_timer;
+	int hangcheck_count;
+	uint32_t last_acthd[I915_NUM_RINGS];
+	uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+	unsigned int stop_rings;
+
+	unsigned long cfb_size;
+	unsigned int cfb_fb;
+	enum plane cfb_plane;
+	int cfb_y;
+	struct intel_fbc_work *fbc_work;
+
+	struct intel_opregion opregion;
+
+	/* overlay */
+	struct intel_overlay *overlay;
+	bool sprite_scaling_enabled;
+
+	/* LVDS info */
+	int backlight_level;  /* restore backlight to this value */
+	bool backlight_enabled;
+	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+	/* Feature bits from the VBIOS */
+	unsigned int int_tv_support:1;
+	unsigned int lvds_dither:1;
+	unsigned int lvds_vbt:1;
+	unsigned int int_crt_support:1;
+	unsigned int lvds_use_ssc:1;
+	unsigned int display_clock_mode:1;
+	int lvds_ssc_freq;
+	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+	unsigned int lvds_val; /* used for checking LVDS channel mode */
+	struct {
+		int rate;
+		int lanes;
+		int preemphasis;
+		int vswing;
+
+		bool initialized;
+		bool support;
+		int bpp;
+		struct edp_power_seq pps;
+	} edp;
+	bool no_aux_handshake;
+
+	int crt_ddc_pin;
+	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+	unsigned int fsb_freq, mem_freq, is_ddr3;
+
+	spinlock_t error_lock;
+	/* Protected by dev->error_lock. */
+	struct drm_i915_error_state *first_error;
+	struct work_struct error_work;
+	struct completion error_completion;
+	struct workqueue_struct *wq;
+
+	/* Display functions */
+	struct drm_i915_display_funcs display;
+
+	/* PCH chipset type */
+	enum intel_pch pch_type;
+	unsigned short pch_id;
+
+	unsigned long quirks;
+
+	/* Register state */
+	bool modeset_on_lid;
 
 	struct {
 		/** Bridge to intel-gtt-ko */
-		const struct intel_gtt *gtt;
+		struct intel_gtt *gtt;
 		/** Memory allocator for GTT stolen memory */
 		struct drm_mm stolen;
 		/** Memory allocator for GTT */
@@ -706,8 +779,6 @@ typedef struct drm_i915_private {
 		/** PPGTT used for aliasing the PPGTT with the GTT */
 		struct i915_hw_ppgtt *aliasing_ppgtt;
 
-		u32 *l3_remap_info;
-
 		struct shrinker inactive_shrinker;
 
 		/**
@@ -785,19 +856,6 @@ typedef struct drm_i915_private {
 		u32 object_count;
 	} mm;
 
-	/* Old dri1 support infrastructure, beware the dragons ya fools entering
-	 * here! */
-	struct {
-		unsigned allow_batchbuffer : 1;
-		u32 __iomem *gfx_hws_cpu_addr;
-
-		unsigned int cpp;
-		int back_offset;
-		int front_offset;
-		int current_page;
-		int page_flipping;
-	} dri1;
-
 	/* Kernel Modesetting */
 
 	struct sdvo_device_mapping sdvo_mappings[2];
@@ -811,6 +869,7 @@ typedef struct drm_i915_private {
 	wait_queue_head_t pending_flip_queue;
 
 	struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+	struct intel_ddi_plls ddi_plls;
 
 	/* Reclocking support */
 	bool render_reclock_avail;
@@ -820,46 +879,17 @@ typedef struct drm_i915_private {
 	u16 orig_clock;
 	int child_dev_num;
 	struct child_device_config *child_dev;
-	struct drm_connector *int_lvds_connector;
-	struct drm_connector *int_edp_connector;
 
 	bool mchbar_need_disable;
 
-	/* gen6+ rps state */
-	struct {
-		struct work_struct work;
-		u32 pm_iir;
-		/* lock - irqsave spinlock that protectects the work_struct and
-		 * pm_iir. */
-		spinlock_t lock;
+	struct intel_l3_parity l3_parity;
 
-		/* The below variables an all the rps hw state are protected by
-		 * dev->struct mutext. */
-		u8 cur_delay;
-		u8 min_delay;
-		u8 max_delay;
-	} rps;
+	/* gen6+ rps state */
+	struct intel_gen6_power_mgmt rps;
 
 	/* ilk-only ips/rps state. Everything in here is protected by the global
 	 * mchdev_lock in intel_pm.c */
-	struct {
-		u8 cur_delay;
-		u8 min_delay;
-		u8 max_delay;
-		u8 fmax;
-		u8 fstart;
-
-		u64 last_count1;
-		unsigned long last_time1;
-		unsigned long chipset_power;
-		u64 last_count2;
-		struct timespec last_time2;
-		unsigned long gfx_power;
-		u8 corr;
-
-		int c_m;
-		int r_t;
-	} ips;
+	struct intel_ilk_power_mgmt ips;
 
 	enum no_fbc_reason no_fbc_reason;
 
@@ -871,14 +901,27 @@ typedef struct drm_i915_private {
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
 
+	/*
+	 * The console may be contended at resume, but we don't
+	 * want it to block on it.
+	 */
+	struct work_struct console_resume_work;
+
 	struct backlight_device *backlight;
 
 	struct drm_property *broadcast_rgb_property;
 	struct drm_property *force_audio_property;
 
-	struct work_struct parity_error_work;
 	bool hw_contexts_disabled;
 	uint32_t hw_context_size;
+
+	bool fdi_rx_polarity_reversed;
+
+	struct i915_suspend_saved_registers regfile;
+
+	/* Old dri1 support infrastructure, beware the dragons ya fools entering
+	 * here! */
+	struct i915_dri1_state dri1;
 } drm_i915_private_t;
 
 /* Iterate over initialised rings */
@@ -1120,9 +1163,14 @@ struct drm_i915_file_private {
 #define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
 #define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
 #define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev)		((dev)->pci_device == 0x0156 || \
+				 (dev)->pci_device == 0x0152 ||	\
+				 (dev)->pci_device == 0x015a)
 #define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
 #define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
+#define IS_ULT(dev)		(IS_HASWELL(dev) && \
+				 ((dev)->pci_device & 0xFF00) == 0x0A00)
 
 /*
  * The genX designation typically refers to the render engine, so render
@@ -1168,6 +1216,13 @@ struct drm_i915_file_private {
 
 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
 
+#define INTEL_PCH_DEVICE_ID_MASK		0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
+
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -1250,6 +1305,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 
+extern void intel_console_resume(struct work_struct *work);
 
 /* i915_irq.c */
 void i915_hangcheck_elapsed(unsigned long data);
@@ -1257,6 +1313,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
 
 extern void intel_irq_init(struct drm_device *dev);
 extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_reset(struct drm_device *dev);
 
 void i915_error_state_free(struct kref *error_ref);
 
@@ -1368,8 +1425,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_ring_buffer *to);
 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-				    struct intel_ring_buffer *ring,
-				    u32 seqno);
+				    struct intel_ring_buffer *ring);
 
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
@@ -1387,7 +1443,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 	return (int32_t)(seq1 - seq2) >= 0;
 }
 
-u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 
 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1499,6 +1555,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
 			      unsigned long start,
 			      unsigned long mappable_end,
 			      unsigned long end);
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_gtt_fini(struct drm_device *dev);
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6)
+		intel_gtt_chipset_flush();
+}
+
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1595,11 +1659,12 @@ extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+					 bool force_restore);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
-extern void ironlake_init_pch_refclk(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_device *dev);
 extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
@@ -1628,6 +1693,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+
 #define __i915_read(x, y) \
 	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9b285da4449b..742206e45103 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -845,12 +845,12 @@ out:
 		 * domain anymore. */
 		if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 			i915_gem_clflush_object(obj);
-			intel_gtt_chipset_flush();
+			i915_gem_chipset_flush(dev);
 		}
 	}
 
 	if (needs_clflush_after)
-		intel_gtt_chipset_flush();
+		i915_gem_chipset_flush(dev);
 
 	return ret;
 }
@@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	trace_i915_gem_object_fault(obj, page_offset, true, write);
 
 	/* Now bind it into the GTT if needed */
-	if (!obj->map_and_fenceable) {
-		ret = i915_gem_object_unbind(obj);
-		if (ret)
-			goto unlock;
-	}
-	if (!obj->gtt_space) {
-		ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
-		if (ret)
-			goto unlock;
-
-		ret = i915_gem_object_set_to_gtt_domain(obj, write);
-		if (ret)
-			goto unlock;
-	}
-
-	if (!obj->has_global_gtt_mapping)
-		i915_gem_gtt_bind_object(obj, obj->cache_level);
-
-	ret = i915_gem_object_get_fence(obj);
+	ret = i915_gem_object_pin(obj, 0, true, false);
 	if (ret)
 		goto unlock;
 
-	if (i915_gem_object_is_inactive(obj))
-		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+	ret = i915_gem_object_set_to_gtt_domain(obj, write);
+	if (ret)
+		goto unpin;
+
+	ret = i915_gem_object_get_fence(obj);
+	if (ret)
+		goto unpin;
 
 	obj->fault_mappable = true;
 
@@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 	/* Finally, remap it using the new GTT offset */
 	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+unpin:
+	i915_gem_object_unpin(obj);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 out:
@@ -1707,10 +1696,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 	if (obj->pages_pin_count)
 		return -EBUSY;
 
+	/* ->put_pages might need to allocate memory for the bit17 swizzle
+	 * array, hence protect them from being reaped by removing them from gtt
+	 * lists early. */
+	list_del(&obj->gtt_list);
+
 	ops->put_pages(obj);
 	obj->pages = NULL;
 
-	list_del(&obj->gtt_list);
 	if (i915_gem_object_is_purgeable(obj))
 		i915_gem_object_truncate(obj);
 
@@ -1868,11 +1861,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 
 void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-			       struct intel_ring_buffer *ring,
-			       u32 seqno)
+			       struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 seqno = intel_ring_get_seqno(ring);
 
 	BUG_ON(ring == NULL);
 	obj->ring = ring;
@@ -1933,26 +1926,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 	WARN_ON(i915_verify_lists(dev));
 }
 
-static u32
-i915_gem_get_seqno(struct drm_device *dev)
+static int
+i915_gem_handle_seqno_wrap(struct drm_device *dev)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 seqno = dev_priv->next_seqno;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int ret, i, j;
 
-	/* reserve 0 for non-seqno */
-	if (++dev_priv->next_seqno == 0)
-		dev_priv->next_seqno = 1;
+	/* The hardware uses various monotonic 32-bit counters, if we
+	 * detect that they will wraparound we need to idle the GPU
+	 * and reset those counters.
+	 */
+	ret = 0;
+	for_each_ring(ring, dev_priv, i) {
+		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+			ret |= ring->sync_seqno[j] != 0;
+	}
+	if (ret == 0)
+		return ret;
 
-	return seqno;
+	ret = i915_gpu_idle(dev);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests(dev);
+	for_each_ring(ring, dev_priv, i) {
+		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+			ring->sync_seqno[j] = 0;
+	}
+
+	return 0;
 }
 
-u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 {
-	if (ring->outstanding_lazy_request == 0)
-		ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	return ring->outstanding_lazy_request;
+	/* reserve 0 for non-seqno */
+	if (dev_priv->next_seqno == 0) {
+		int ret = i915_gem_handle_seqno_wrap(dev);
+		if (ret)
+			return ret;
+
+		dev_priv->next_seqno = 1;
+	}
+
+	*seqno = dev_priv->next_seqno++;
+	return 0;
 }
 
 int
@@ -1963,7 +1984,6 @@ i915_add_request(struct intel_ring_buffer *ring,
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	struct drm_i915_gem_request *request;
 	u32 request_ring_position;
-	u32 seqno;
 	int was_empty;
 	int ret;
 
@@ -1982,7 +2002,6 @@ i915_add_request(struct intel_ring_buffer *ring,
 	if (request == NULL)
 		return -ENOMEM;
 
-	seqno = i915_gem_next_request_seqno(ring);
 
 	/* Record the position of the start of the request so that
 	 * should we detect the updated seqno part-way through the
@@ -1991,15 +2010,13 @@ i915_add_request(struct intel_ring_buffer *ring,
 	 */
 	request_ring_position = intel_ring_get_tail(ring);
 
-	ret = ring->add_request(ring, &seqno);
+	ret = ring->add_request(ring);
 	if (ret) {
 		kfree(request);
 		return ret;
 	}
 
-	trace_i915_gem_request_add(ring, seqno);
-
-	request->seqno = seqno;
+	request->seqno = intel_ring_get_seqno(ring);
 	request->ring = ring;
 	request->tail = request_ring_position;
 	request->emitted_jiffies = jiffies;
@@ -2017,23 +2034,24 @@ i915_add_request(struct intel_ring_buffer *ring,
 		spin_unlock(&file_priv->mm.lock);
 	}
 
+	trace_i915_gem_request_add(ring, request->seqno);
 	ring->outstanding_lazy_request = 0;
 
 	if (!dev_priv->mm.suspended) {
 		if (i915_enable_hangcheck) {
 			mod_timer(&dev_priv->hangcheck_timer,
-				  jiffies +
-				  msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+				  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
 		}
 		if (was_empty) {
 			queue_delayed_work(dev_priv->wq,
-					   &dev_priv->mm.retire_work, HZ);
+					   &dev_priv->mm.retire_work,
+					   round_jiffies_up_relative(HZ));
 			intel_mark_busy(dev_priv->dev);
 		}
 	}
 
 	if (out_seqno)
-		*out_seqno = seqno;
+		*out_seqno = request->seqno;
 	return 0;
 }
 
@@ -2131,7 +2149,6 @@ void
 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
 	uint32_t seqno;
-	int i;
 
 	if (list_empty(&ring->request_list))
 		return;
@@ -2140,10 +2157,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 
 	seqno = ring->get_seqno(ring, true);
 
-	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
-		if (seqno >= ring->sync_seqno[i])
-			ring->sync_seqno[i] = 0;
-
 	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 
@@ -2218,7 +2231,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
 
 	/* Come back later if the device is busy... */
 	if (!mutex_trylock(&dev->struct_mutex)) {
-		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+				   round_jiffies_up_relative(HZ));
 		return;
 	}
 
@@ -2236,7 +2250,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	}
 
 	if (!dev_priv->mm.suspended && !idle)
-		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+				   round_jiffies_up_relative(HZ));
 	if (idle)
 		intel_mark_idle(dev);
 
@@ -2386,7 +2401,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 	ret = to->sync_to(to, from, seqno);
 	if (!ret)
-		from->sync_seqno[idx] = seqno;
+		/* We use last_read_seqno because sync_to()
+		 * might have just caused seqno wrap under
+		 * the radar.
+		 */
+		from->sync_seqno[idx] = obj->last_read_seqno;
 
 	return ret;
 }
@@ -2469,14 +2488,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	return 0;
 }
 
-static int i915_ring_idle(struct intel_ring_buffer *ring)
-{
-	if (list_empty(&ring->active_list))
-		return 0;
-
-	return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
-}
-
 int i915_gpu_idle(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2489,7 +2500,7 @@ int i915_gpu_idle(struct drm_device *dev)
 		if (ret)
 			return ret;
 
-		ret = i915_ring_idle(ring);
+		ret = intel_ring_idle(ring);
 		if (ret)
 			return ret;
 	}
@@ -2923,13 +2934,14 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
+	i915_gem_object_pin_pages(obj);
+
  search_free:
 	if (map_and_fenceable)
-		free_space =
-			drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
-							  size, alignment, obj->cache_level,
-							  0, dev_priv->mm.gtt_mappable_end,
-							  false);
+		free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+							       size, alignment, obj->cache_level,
+							       0, dev_priv->mm.gtt_mappable_end,
+							       false);
 	else
 		free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
 						      size, alignment, obj->cache_level,
@@ -2937,60 +2949,60 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
 	if (free_space != NULL) {
 		if (map_and_fenceable)
-			obj->gtt_space =
+			free_space =
 				drm_mm_get_block_range_generic(free_space,
 							       size, alignment, obj->cache_level,
 							       0, dev_priv->mm.gtt_mappable_end,
 							       false);
 		else
-			obj->gtt_space =
+			free_space =
 				drm_mm_get_block_generic(free_space,
 							 size, alignment, obj->cache_level,
 							 false);
 	}
-	if (obj->gtt_space == NULL) {
+	if (free_space == NULL) {
 		ret = i915_gem_evict_something(dev, size, alignment,
 					       obj->cache_level,
 					       map_and_fenceable,
 					       nonblocking);
-		if (ret)
+		if (ret) {
+			i915_gem_object_unpin_pages(obj);
 			return ret;
+		}
 
 		goto search_free;
 	}
 	if (WARN_ON(!i915_gem_valid_gtt_space(dev,
-					      obj->gtt_space,
+					      free_space,
 					      obj->cache_level))) {
-		drm_mm_put_block(obj->gtt_space);
-		obj->gtt_space = NULL;
+		i915_gem_object_unpin_pages(obj);
+		drm_mm_put_block(free_space);
 		return -EINVAL;
 	}
 
-
 	ret = i915_gem_gtt_prepare_object(obj);
 	if (ret) {
-		drm_mm_put_block(obj->gtt_space);
-		obj->gtt_space = NULL;
+		i915_gem_object_unpin_pages(obj);
+		drm_mm_put_block(free_space);
 		return ret;
 	}
 
-	if (!dev_priv->mm.aliasing_ppgtt)
-		i915_gem_gtt_bind_object(obj, obj->cache_level);
-
 	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
 	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-	obj->gtt_offset = obj->gtt_space->start;
+	obj->gtt_space = free_space;
+	obj->gtt_offset = free_space->start;
 
 	fenceable =
-		obj->gtt_space->size == fence_size &&
-		(obj->gtt_space->start & (fence_alignment - 1)) == 0;
+		free_space->size == fence_size &&
+		(free_space->start & (fence_alignment - 1)) == 0;
 
 	mappable =
 		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
 
 	obj->map_and_fenceable = mappable && fenceable;
 
+	i915_gem_object_unpin_pages(obj);
 	trace_i915_gem_object_bind(obj, map_and_fenceable);
 	i915_gem_verify_gtt(dev);
 	return 0;
@@ -3059,7 +3071,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 		return;
 
 	i915_gem_clflush_object(obj);
-	intel_gtt_chipset_flush();
+	i915_gem_chipset_flush(obj->base.dev);
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
 
@@ -3454,11 +3466,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 	}
 
 	if (obj->gtt_space == NULL) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
 		ret = i915_gem_object_bind_to_gtt(obj, alignment,
 						  map_and_fenceable,
 						  nonblocking);
 		if (ret)
 			return ret;
+
+		if (!dev_priv->mm.aliasing_ppgtt)
+			i915_gem_gtt_bind_object(obj, obj->cache_level);
 	}
 
 	if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -3832,7 +3849,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
 	if (!IS_IVYBRIDGE(dev))
 		return;
 
-	if (!dev_priv->mm.l3_remap_info)
+	if (!dev_priv->l3_parity.remap_info)
 		return;
 
 	misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -3841,12 +3858,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
 
 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
 		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
-		if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
+		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
 			DRM_DEBUG("0x%x was already programmed to %x\n",
 				  GEN7_L3LOG_BASE + i, remap);
-		if (remap && !dev_priv->mm.l3_remap_info[i/4])
+		if (remap && !dev_priv->l3_parity.remap_info[i/4])
 			DRM_DEBUG_DRIVER("Clearing remapped register\n");
-		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
+		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
 	}
 
 	/* Make sure all the writes land before disabling dop clock gating */
@@ -3876,68 +3893,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
 }
 
-void i915_gem_init_ppgtt(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t pd_offset;
-	struct intel_ring_buffer *ring;
-	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-	uint32_t __iomem *pd_addr;
-	uint32_t pd_entry;
-	int i;
-
-	if (!dev_priv->mm.aliasing_ppgtt)
-		return;
-
-
-	pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
-	for (i = 0; i < ppgtt->num_pd_entries; i++) {
-		dma_addr_t pt_addr;
-
-		if (dev_priv->mm.gtt->needs_dmar)
-			pt_addr = ppgtt->pt_dma_addr[i];
-		else
-			pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
-		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
-		pd_entry |= GEN6_PDE_VALID;
-
-		writel(pd_entry, pd_addr + i);
-	}
-	readl(pd_addr);
-
-	pd_offset = ppgtt->pd_offset;
-	pd_offset /= 64; /* in cachelines, */
-	pd_offset <<= 16;
-
-	if (INTEL_INFO(dev)->gen == 6) {
-		uint32_t ecochk, gab_ctl, ecobits;
-
-		ecobits = I915_READ(GAC_ECO_BITS); 
-		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
-
-		gab_ctl = I915_READ(GAB_CTL);
-		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
-
-		ecochk = I915_READ(GAM_ECOCHK);
-		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
-				       ECOCHK_PPGTT_CACHE64B);
-		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-	} else if (INTEL_INFO(dev)->gen >= 7) {
-		I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
-		/* GFX_MODE is per-ring on gen7+ */
-	}
-
-	for_each_ring(ring, dev_priv, i) {
-		if (INTEL_INFO(dev)->gen >= 7)
-			I915_WRITE(RING_MODE_GEN7(ring),
-				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
-		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
-	}
-}
-
 static bool
 intel_enable_blt(struct drm_device *dev)
 {
@@ -3960,7 +3915,7 @@ i915_gem_init_hw(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 
-	if (!intel_enable_gtt())
+	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
 		return -EIO;
 
 	if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@@ -4295,7 +4250,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 			page_cache_release(page);
 		}
 	}
-	intel_gtt_chipset_flush();
+	i915_gem_chipset_flush(dev);
 
 	obj->phys_obj->cur_obj = NULL;
 	obj->phys_obj = NULL;
@@ -4382,7 +4337,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
 			return -EFAULT;
 	}
 
-	intel_gtt_chipset_flush();
+	i915_gem_chipset_flush(dev);
 	return 0;
 }
 
@@ -4407,6 +4362,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 	spin_unlock(&file_priv->mm.lock);
 }
 
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+	if (!mutex_is_locked(mutex))
+		return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+	return mutex->owner == task;
+#else
+	/* Since UP may be pre-empted, we cannot assume that we own the lock */
+	return false;
+#endif
+}
+
 static int
 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 {
@@ -4417,10 +4385,15 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_i915_gem_object *obj;
 	int nr_to_scan = sc->nr_to_scan;
+	bool unlock = true;
 	int cnt;
 
-	if (!mutex_trylock(&dev->struct_mutex))
-		return 0;
+	if (!mutex_trylock(&dev->struct_mutex)) {
+		if (!mutex_is_locked_by(&dev->struct_mutex, current))
+			return 0;
+
+		unlock = false;
+	}
 
 	if (nr_to_scan) {
 		nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
@@ -4436,6 +4409,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
 			cnt += obj->base.size >> PAGE_SHIFT;
 
-	mutex_unlock(&dev->struct_mutex);
+	if (unlock)
+		mutex_unlock(&dev->struct_mutex);
 	return cnt;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 05ed42f203d7..a3f06bcad551 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
 	struct i915_hw_context *ctx;
 	int ret, id;
 
-	ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (ctx == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from_obj != NULL) {
-		u32 seqno = i915_gem_next_request_seqno(ring);
 		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_gem_object_move_to_active(from_obj, ring, seqno);
+		i915_gem_object_move_to_active(from_obj, ring);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3eea143749f6..ee8f97f0539e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 					 target_i915_obj->cache_level);
 	}
 
-	/* The target buffer should have appeared before us in the
-	 * exec_object list, so it should have a GTT space bound by now.
-	 */
-	if (unlikely(target_offset == 0)) {
-		DRM_DEBUG("No GTT space found for object %d\n",
-			  reloc->target_handle);
-		return ret;
-	}
-
 	/* Validate that the target is in a valid r/w GPU domain */
 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
 		DRM_DEBUG("reloc with multiple write domains: "
@@ -672,7 +663,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 	}
 
 	if (flush_domains & I915_GEM_DOMAIN_CPU)
-		intel_gtt_chipset_flush();
+		i915_gem_chipset_flush(ring->dev);
 
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
@@ -722,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
 
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *objects,
-				   struct intel_ring_buffer *ring,
-				   u32 seqno)
+				   struct intel_ring_buffer *ring)
 {
 	struct drm_i915_gem_object *obj;
 
@@ -735,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
 		obj->base.write_domain = obj->base.pending_write_domain;
 		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
-		i915_gem_object_move_to_active(obj, ring, seqno);
+		i915_gem_object_move_to_active(obj, ring);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
-			obj->last_write_seqno = seqno;
+			obj->last_write_seqno = intel_ring_get_seqno(ring);
 			if (obj->pin_count) /* check for potential scanout */
 				intel_mark_fb_busy(obj);
 		}
@@ -798,8 +788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	struct intel_ring_buffer *ring;
 	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 	u32 exec_start, exec_len;
-	u32 seqno;
 	u32 mask;
+	u32 flags;
 	int ret, mode, i;
 
 	if (!i915_gem_check_execbuffer(args)) {
@@ -811,6 +801,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (ret)
 		return ret;
 
+	flags = 0;
+	if (args->flags & I915_EXEC_SECURE) {
+		if (!file->is_master || !capable(CAP_SYS_ADMIN))
+		    return -EPERM;
+
+		flags |= I915_DISPATCH_SECURE;
+	}
+
 	switch (args->flags & I915_EXEC_RING_MASK) {
 	case I915_EXEC_DEFAULT:
 	case I915_EXEC_RENDER:
@@ -983,26 +981,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	}
 	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
+	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+	 * batch" bit. Hence we need to pin secure batches into the global gtt.
+	 * hsw should have this fixed, but let's be paranoid and do it
+	 * unconditionally for now. */
+	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
+		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+
 	ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
 	if (ret)
 		goto err;
 
-	seqno = i915_gem_next_request_seqno(ring);
-	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
-		if (seqno < ring->sync_seqno[i]) {
-			/* The GPU can not handle its semaphore value wrapping,
-			 * so every billion or so execbuffers, we need to stall
-			 * the GPU in order to reset the counters.
-			 */
-			ret = i915_gpu_idle(dev);
-			if (ret)
-				goto err;
-			i915_gem_retire_requests(dev);
-
-			BUG_ON(ring->sync_seqno[i]);
-		}
-	}
-
 	ret = i915_switch_context(ring, file, ctx_id);
 	if (ret)
 		goto err;
@@ -1028,8 +1017,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			goto err;
 	}
 
-	trace_i915_gem_ring_dispatch(ring, seqno);
-
 	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
 	exec_len = args->batch_len;
 	if (cliprects) {
@@ -1040,17 +1027,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 				goto err;
 
 			ret = ring->dispatch_execbuffer(ring,
-							exec_start, exec_len);
+							exec_start, exec_len,
+							flags);
 			if (ret)
 				goto err;
 		}
 	} else {
-		ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+		ret = ring->dispatch_execbuffer(ring,
+						exec_start, exec_len,
+						flags);
 		if (ret)
 			goto err;
 	}
 
-	i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+	i915_gem_execbuffer_move_to_active(&objects, ring);
 	i915_gem_execbuffer_retire_commands(dev, file, ring);
 
 err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index df470b5e8d36..2c150dee78a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,19 +28,67 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+typedef uint32_t gtt_pte_t;
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID			(1 << 0)
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID			(1 << 0)
+#define GEN6_PTE_UNCACHED		(1 << 1)
+#define HSW_PTE_UNCACHED		(0)
+#define GEN6_PTE_CACHE_LLC		(2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
+#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
+
+static inline gtt_pte_t pte_encode(struct drm_device *dev,
+				   dma_addr_t addr,
+				   enum i915_cache_level level)
+{
+	gtt_pte_t pte = GEN6_PTE_VALID;
+	pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+	switch (level) {
+	case I915_CACHE_LLC_MLC:
+		/* Haswell doesn't set L3 this way */
+		if (IS_HASWELL(dev))
+			pte |= GEN6_PTE_CACHE_LLC;
+		else
+			pte |= GEN6_PTE_CACHE_LLC_MLC;
+		break;
+	case I915_CACHE_LLC:
+		pte |= GEN6_PTE_CACHE_LLC;
+		break;
+	case I915_CACHE_NONE:
+		if (IS_HASWELL(dev))
+			pte |= HSW_PTE_UNCACHED;
+		else
+			pte |= GEN6_PTE_UNCACHED;
+		break;
+	default:
+		BUG();
+	}
+
+
+	return pte;
+}
+
 /* PPGTT support for Sandybdrige/Gen6 and later */
 static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
 				   unsigned first_entry,
 				   unsigned num_entries)
 {
-	uint32_t *pt_vaddr;
-	uint32_t scratch_pte;
+	gtt_pte_t *pt_vaddr;
+	gtt_pte_t scratch_pte;
 	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
 	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
 	unsigned last_pte, i;
 
-	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
-	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+	scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
+				 I915_CACHE_LLC);
 
 	while (num_entries) {
 		last_pte = first_pte + num_entries;
@@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
 	if (!ppgtt)
 		return ret;
 
+	ppgtt->dev = dev;
 	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
 	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
 				  GFP_KERNEL);
@@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
 	i915_ppgtt_clear_range(ppgtt, 0,
 			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
 
-	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
+	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
 
 	dev_priv->mm.aliasing_ppgtt = ppgtt;
 
@@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
 static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
 					 const struct sg_table *pages,
 					 unsigned first_entry,
-					 uint32_t pte_flags)
+					 enum i915_cache_level cache_level)
 {
-	uint32_t *pt_vaddr, pte;
+	gtt_pte_t *pt_vaddr;
 	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
 	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
 	unsigned i, j, m, segment_len;
@@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
 
 		for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
 			page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
-			pt_vaddr[j] = pte | pte_flags;
+			pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
+						 cache_level);
 
 			/* grab the next page */
 			if (++m == segment_len) {
@@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 			    struct drm_i915_gem_object *obj,
 			    enum i915_cache_level cache_level)
 {
-	uint32_t pte_flags = GEN6_PTE_VALID;
-
-	switch (cache_level) {
-	case I915_CACHE_LLC_MLC:
-		pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
-		break;
-	case I915_CACHE_LLC:
-		pte_flags |= GEN6_PTE_CACHE_LLC;
-		break;
-	case I915_CACHE_NONE:
-		if (IS_HASWELL(obj->base.dev))
-			pte_flags |= HSW_PTE_UNCACHED;
-		else
-			pte_flags |= GEN6_PTE_UNCACHED;
-		break;
-	default:
-		BUG();
-	}
-
 	i915_ppgtt_insert_sg_entries(ppgtt,
 				     obj->pages,
 				     obj->gtt_space->start >> PAGE_SHIFT,
-				     pte_flags);
+				     cache_level);
 }
 
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 			       obj->base.size >> PAGE_SHIFT);
 }
 
-/* XXX kill agp_type! */
-static unsigned int cache_level_to_agp_type(struct drm_device *dev,
-					    enum i915_cache_level cache_level)
+void i915_gem_init_ppgtt(struct drm_device *dev)
 {
-	switch (cache_level) {
-	case I915_CACHE_LLC_MLC:
-		if (INTEL_INFO(dev)->gen >= 6)
-			return AGP_USER_CACHED_MEMORY_LLC_MLC;
-		/* Older chipsets do not have this extra level of CPU
-		 * cacheing, so fallthrough and request the PTE simply
-		 * as cached.
-		 */
-	case I915_CACHE_LLC:
-		return AGP_USER_CACHED_MEMORY;
-	default:
-	case I915_CACHE_NONE:
-		return AGP_USER_MEMORY;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t pd_offset;
+	struct intel_ring_buffer *ring;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	uint32_t __iomem *pd_addr;
+	uint32_t pd_entry;
+	int i;
+
+	if (!dev_priv->mm.aliasing_ppgtt)
+		return;
+
+
+	pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+	for (i = 0; i < ppgtt->num_pd_entries; i++) {
+		dma_addr_t pt_addr;
+
+		if (dev_priv->mm.gtt->needs_dmar)
+			pt_addr = ppgtt->pt_dma_addr[i];
+		else
+			pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+		pd_entry |= GEN6_PDE_VALID;
+
+		writel(pd_entry, pd_addr + i);
+	}
+	readl(pd_addr);
+
+	pd_offset = ppgtt->pd_offset;
+	pd_offset /= 64; /* in cachelines, */
+	pd_offset <<= 16;
+
+	if (INTEL_INFO(dev)->gen == 6) {
+		uint32_t ecochk, gab_ctl, ecobits;
+
+		ecobits = I915_READ(GAC_ECO_BITS);
+		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+		gab_ctl = I915_READ(GAB_CTL);
+		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+		ecochk = I915_READ(GAM_ECOCHK);
+		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+				       ECOCHK_PPGTT_CACHE64B);
+		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+	} else if (INTEL_INFO(dev)->gen >= 7) {
+		I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+		/* GFX_MODE is per-ring on gen7+ */
+	}
+
+	for_each_ring(ring, dev_priv, i) {
+		if (INTEL_INFO(dev)->gen >= 7)
+			I915_WRITE(RING_MODE_GEN7(ring),
+				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
 	}
 }
 
@@ -288,13 +360,40 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
 		dev_priv->mm.interruptible = interruptible;
 }
 
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+				 unsigned first_entry,
+				 unsigned num_entries)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	gtt_pte_t scratch_pte;
+	gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
+	const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+	int i;
+
+	if (INTEL_INFO(dev)->gen < 6) {
+		intel_gtt_clear_range(first_entry, num_entries);
+		return;
+	}
+
+	if (WARN(num_entries > max_entries,
+		 "First entry = %d; Num entries = %d (max=%d)\n",
+		 first_entry, num_entries, max_entries))
+		num_entries = max_entries;
+
+	scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
+	for (i = 0; i < num_entries; i++)
+		iowrite32(scratch_pte, &gtt_base[i]);
+	readl(gtt_base);
+}
+
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 
 	/* First fill our portion of the GTT with scratch pages */
-	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+	i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
 			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -302,7 +401,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 		i915_gem_gtt_bind_object(obj, obj->cache_level);
 	}
 
-	intel_gtt_chipset_flush();
+	i915_gem_chipset_flush(dev);
 }
 
 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -318,21 +417,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 	return 0;
 }
 
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
+				  enum i915_cache_level level)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct sg_table *st = obj->pages;
+	struct scatterlist *sg = st->sgl;
+	const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
+	const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+	gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+	int unused, i = 0;
+	unsigned int len, m = 0;
+	dma_addr_t addr;
+
+	for_each_sg(st->sgl, sg, st->nents, unused) {
+		len = sg_dma_len(sg) >> PAGE_SHIFT;
+		for (m = 0; m < len; m++) {
+			addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+			iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+			i++;
+		}
+	}
+
+	BUG_ON(i > max_entries);
+	BUG_ON(i != obj->base.size / PAGE_SIZE);
+
+	/* XXX: This serves as a posting read to make sure that the PTE has
+	 * actually been updated. There is some concern that even though
+	 * registers and PTEs are within the same BAR that they are potentially
+	 * of NUMA access patterns. Therefore, even with the way we assume
+	 * hardware should work, we must keep this posting read for paranoia.
+	 */
+	if (i != 0)
+		WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+
+	/* This next bit makes the above posting read even more important. We
+	 * want to flush the TLBs only after we're certain all the PTE updates
+	 * have finished.
+	 */
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 			      enum i915_cache_level cache_level)
 {
 	struct drm_device *dev = obj->base.dev;
-	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
+	if (INTEL_INFO(dev)->gen < 6) {
+		unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+			AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+		intel_gtt_insert_sg_entries(obj->pages,
+					    obj->gtt_space->start >> PAGE_SHIFT,
+					    flags);
+	} else {
+		gen6_ggtt_bind_object(obj, cache_level);
+	}
 
-	intel_gtt_insert_sg_entries(obj->pages,
-				    obj->gtt_space->start >> PAGE_SHIFT,
-				    agp_type);
 	obj->has_global_gtt_mapping = 1;
 }
 
 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
-	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+	i915_ggtt_clear_range(obj->base.dev,
+			      obj->gtt_space->start >> PAGE_SHIFT,
 			      obj->base.size >> PAGE_SHIFT);
 
 	obj->has_global_gtt_mapping = 0;
@@ -390,5 +544,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
 	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
 
 	/* ... but ensure that we clear the entire range. */
-	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+	i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}
+
+static int setup_scratch_page(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct page *page;
+	dma_addr_t dma_addr;
+
+	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+	if (page == NULL)
+		return -ENOMEM;
+	get_page(page);
+	set_pages_uc(page, 1);
+
+#ifdef CONFIG_INTEL_IOMMU
+	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
+				PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(dev->pdev, dma_addr))
+		return -EINVAL;
+#else
+	dma_addr = page_to_phys(page);
+#endif
+	dev_priv->mm.gtt->scratch_page = page;
+	dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+
+	return 0;
+}
+
+static void teardown_scratch_page(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
+	pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	put_page(dev_priv->mm.gtt->scratch_page);
+	__free_page(dev_priv->mm.gtt->scratch_page);
+}
+
+static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+	return snb_gmch_ctl << 20;
+}
+
+static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+	return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+{
+	static const int stolen_decoder[] = {
+		0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
+	snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
+	snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
+	return stolen_decoder[snb_gmch_ctl] << 20;
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	phys_addr_t gtt_bus_addr;
+	u16 snb_gmch_ctl;
+	int ret;
+
+	/* On modern platforms we need not worry ourself with the legacy
+	 * hostbridge query stuff. Skip it entirely
+	 */
+	if (INTEL_INFO(dev)->gen < 6) {
+		ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
+		if (!ret) {
+			DRM_ERROR("failed to set up gmch\n");
+			return -EIO;
+		}
+
+		dev_priv->mm.gtt = intel_gtt_get();
+		if (!dev_priv->mm.gtt) {
+			DRM_ERROR("Failed to initialize GTT\n");
+			intel_gmch_remove();
+			return -ENODEV;
+		}
+		return 0;
+	}
+
+	dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
+	if (!dev_priv->mm.gtt)
+		return -ENOMEM;
+
+	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
+		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+
+#ifdef CONFIG_INTEL_IOMMU
+	dev_priv->mm.gtt->needs_dmar = 1;
+#endif
+
+	/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
+	gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
+	dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
+
+	/* i9xx_setup */
+	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+	dev_priv->mm.gtt->gtt_total_entries =
+		gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
+	if (INTEL_INFO(dev)->gen < 7)
+		dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
+	else
+		dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
+
+	dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
+	/* 64/512MB is the current min/max we actually know of, but this is just a
+	 * coarse sanity check.
+	 */
+	if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
+	    dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
+		DRM_ERROR("Unknown GMADR entries (%d)\n",
+			  dev_priv->mm.gtt->gtt_mappable_entries);
+		ret = -ENXIO;
+		goto err_out;
+	}
+
+	ret = setup_scratch_page(dev);
+	if (ret) {
+		DRM_ERROR("Scratch setup failed\n");
+		goto err_out;
+	}
+
+	dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
+					   dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
+	if (!dev_priv->mm.gtt->gtt) {
+		DRM_ERROR("Failed to map the gtt page table\n");
+		teardown_scratch_page(dev);
+		ret = -ENOMEM;
+		goto err_out;
+	}
+
+	/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
+	DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
+	DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
+	DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+
+	return 0;
+
+err_out:
+	kfree(dev_priv->mm.gtt);
+	if (INTEL_INFO(dev)->gen < 6)
+		intel_gmch_remove();
+	return ret;
+}
+
+void i915_gem_gtt_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	iounmap(dev_priv->mm.gtt->gtt);
+	teardown_scratch_page(dev);
+	if (INTEL_INFO(dev)->gen < 6)
+		intel_gmch_remove();
+	kfree(dev_priv->mm.gtt);
 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 32e1bda865b8..a4dc97f8b9f0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -122,7 +122,10 @@ static int
 i915_pipe_enabled(struct drm_device *dev, int pipe)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+
+	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
 }
 
 /* Called from drm generic code, passed a 'crtc', which
@@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 	int vbl_start, vbl_end, htotal, vtotal;
 	bool in_vbl = true;
 	int ret = 0;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
 
 	if (!i915_pipe_enabled(dev, pipe)) {
 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 	}
 
 	/* Get vtotal. */
-	vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		/* No obvious pixelcount register. Only query vertical
@@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 		 */
 		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
-		htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
 		*vpos = position / htotal;
 		*hpos = position - (*vpos * htotal);
 	}
 
 	/* Query vblank area. */
-	vbl = I915_READ(VBLANK(pipe));
+	vbl = I915_READ(VBLANK(cpu_transcoder));
 
 	/* Test position against vblank region. */
 	vbl_start = vbl & 0x1fff;
@@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev,
 	if (i915_enable_hangcheck) {
 		dev_priv->hangcheck_count = 0;
 		mod_timer(&dev_priv->hangcheck_timer,
-			  jiffies +
-			  msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+			  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
 	}
 }
 
@@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
 		return;
 
-	mutex_lock(&dev_priv->dev->struct_mutex);
+	mutex_lock(&dev_priv->rps.hw_lock);
 
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
 		new_delay = dev_priv->rps.cur_delay + 1;
@@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
 		gen6_set_rps(dev_priv->dev, new_delay);
 	}
 
-	mutex_unlock(&dev_priv->dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
 
@@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
 static void ivybridge_parity_work(struct work_struct *work)
 {
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    parity_error_work);
+						    l3_parity.error_work);
 	u32 error_status, row, bank, subbank;
 	char *parity_event[5];
 	uint32_t misccpctl;
@@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
-	queue_work(dev_priv->wq, &dev_priv->parity_error_work);
+	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
 static void snb_gt_irq_handler(struct drm_device *dev,
@@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
 	queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
 
-static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int pipe;
 
+	if (pch_iir & SDE_HOTPLUG_MASK)
+		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
 	if (pch_iir & SDE_AUDIO_POWER_MASK)
 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
 				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int pipe;
 
+	if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
 				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
 					 I915_READ(FDI_RX_IIR(pipe)));
 }
 
-static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 		if (de_iir & DE_PCH_EVENT_IVB) {
 			u32 pch_iir = I915_READ(SDEIIR);
 
-			if (pch_iir & SDE_HOTPLUG_MASK_CPT)
-				queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 			cpt_irq_handler(dev, pch_iir);
 
 			/* clear PCH hotplug event before clear CPU irq */
@@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
 		notify_ring(dev, &dev_priv->ring[VCS]);
 }
 
-static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int ret = IRQ_NONE;
 	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
-	u32 hotplug_mask;
 
 	atomic_inc(&dev_priv->irq_received);
 
@@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 	    (!IS_GEN6(dev) || pm_iir == 0))
 		goto done;
 
-	if (HAS_PCH_CPT(dev))
-		hotplug_mask = SDE_HOTPLUG_MASK_CPT;
-	else
-		hotplug_mask = SDE_HOTPLUG_MASK;
-
 	ret = IRQ_HANDLED;
 
 	if (IS_GEN5(dev))
@@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 
 	/* check event from PCH */
 	if (de_iir & DE_PCH_EVENT) {
-		if (pch_iir & hotplug_mask)
-			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 		if (HAS_PCH_CPT(dev))
 			cpt_irq_handler(dev, pch_iir);
 		else
@@ -1120,6 +1120,8 @@ static void i915_record_ring_state(struct drm_device *dev,
 			= I915_READ(RING_SYNC_0(ring->mmio_base));
 		error->semaphore_mboxes[ring->id][1]
 			= I915_READ(RING_SYNC_1(ring->mmio_base));
+		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
 	}
 
 	if (INTEL_INFO(dev)->gen >= 4) {
@@ -1464,7 +1466,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
 	spin_lock_irqsave(&dev->event_lock, flags);
 	work = intel_crtc->unpin_work;
 
-	if (work == NULL || work->pending || !work->enable_stall_check) {
+	if (work == NULL ||
+	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+	    !work->enable_stall_check) {
 		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 		return;
@@ -1751,7 +1755,7 @@ void i915_hangcheck_elapsed(unsigned long data)
 repeat:
 	/* Reset timer case chip hangs without another request being added */
 	mod_timer(&dev_priv->hangcheck_timer,
-		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+		  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
 }
 
 /* drm_dma.h hooks
@@ -1956,6 +1960,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 	u32 enable_mask;
 	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
 	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+	u32 render_irqs;
 	u16 msid;
 
 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -1995,21 +2000,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 	I915_WRITE(VLV_IIR, 0xffffffff);
 	I915_WRITE(VLV_IIR, 0xffffffff);
 
-	dev_priv->gt_irq_mask = ~0;
-
-	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-	I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
-		   GT_GEN6_BLT_CS_ERROR_INTERRUPT |
-		   GT_GEN6_BLT_USER_INTERRUPT |
-		   GT_GEN6_BSD_USER_INTERRUPT |
-		   GT_GEN6_BSD_CS_ERROR_INTERRUPT |
-		   GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
-		   GT_PIPE_NOTIFY |
-		   GT_RENDER_CS_ERROR_INTERRUPT |
-		   GT_SYNC_STATUS |
-		   GT_USER_INTERRUPT);
+
+	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+		GEN6_BLITTER_USER_INTERRUPT;
+	I915_WRITE(GTIER, render_irqs);
 	POSTING_READ(GTIER);
 
 	/* ack & enable invalid PTE error interrupts */
@@ -2019,7 +2015,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 #endif
 
 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
-#if 0 /* FIXME: check register definitions; some have moved */
 	/* Note HDMI and DP share bits */
 	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
 		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2027,15 +2022,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
 	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
 		hotplug_en |= HDMID_HOTPLUG_INT_EN;
-	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
 		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
-	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
 		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
 	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
 		hotplug_en |= CRT_HOTPLUG_INT_EN;
 		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 	}
-#endif
 
 	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
@@ -2129,7 +2123,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
 	return 0;
 }
 
-static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2307,7 +2301,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
 	return 0;
 }
 
-static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2545,7 +2539,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
 	return 0;
 }
 
-static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i965_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2691,7 +2685,7 @@ void intel_irq_init(struct drm_device *dev)
 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
-	INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
+	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a4162ddff6c5..3f75cfaf1c3f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
 #define _I915_REG_H_
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
 
@@ -40,6 +41,14 @@
  */
 #define INTEL_GMCH_CTRL		0x52
 #define INTEL_GMCH_VGA_DISABLE  (1 << 1)
+#define SNB_GMCH_CTRL		0x50
+#define    SNB_GMCH_GGMS_SHIFT	8 /* GTT Graphics Memory Size */
+#define    SNB_GMCH_GGMS_MASK	0x3
+#define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
+#define    SNB_GMCH_GMS_MASK    0x1f
+#define    IVB_GMCH_GMS_SHIFT   4
+#define    IVB_GMCH_GMS_MASK    0xf
+
 
 /* PCI config space */
 
@@ -105,23 +114,6 @@
 #define  GEN6_GRDOM_MEDIA		(1 << 2)
 #define  GEN6_GRDOM_BLT			(1 << 3)
 
-/* PPGTT stuff */
-#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
-
-#define GEN6_PDE_VALID			(1 << 0)
-#define GEN6_PDE_LARGE_PAGE		(2 << 0) /* use 32kb pages */
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID			(1 << 0)
-#define GEN6_PTE_UNCACHED		(1 << 1)
-#define HSW_PTE_UNCACHED		(0)
-#define GEN6_PTE_CACHE_LLC		(2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
-#define GEN6_PTE_CACHE_BITS		(3 << 1)
-#define GEN6_PTE_GFDT			(1 << 3)
-#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
-
 #define RING_PP_DIR_BASE(ring)		((ring)->mmio_base+0x228)
 #define RING_PP_DIR_BASE_READ(ring)	((ring)->mmio_base+0x518)
 #define RING_PP_DIR_DCLV(ring)		((ring)->mmio_base+0x220)
@@ -241,11 +233,18 @@
  */
 #define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
 #define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
-#define   MI_INVALIDATE_TLB	(1<<18)
-#define   MI_INVALIDATE_BSD	(1<<7)
+#define   MI_FLUSH_DW_STORE_INDEX	(1<<21)
+#define   MI_INVALIDATE_TLB		(1<<18)
+#define   MI_FLUSH_DW_OP_STOREDW	(1<<14)
+#define   MI_INVALIDATE_BSD		(1<<7)
+#define   MI_FLUSH_DW_USE_GTT		(1<<2)
+#define   MI_FLUSH_DW_USE_PPGTT		(0<<2)
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
-#define   MI_BATCH_NON_SECURE	(1)
-#define   MI_BATCH_NON_SECURE_I965 (1<<8)
+#define   MI_BATCH_NON_SECURE		(1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define   MI_BATCH_NON_SECURE_I965 	(1<<8)
+#define   MI_BATCH_PPGTT_HSW		(1<<8)
+#define   MI_BATCH_NON_SECURE_HSW 	(1<<13)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
@@ -369,6 +368,7 @@
 #define   DPIO_PLL_MODESEL_SHIFT	24 /* 3 bits */
 #define   DPIO_BIAS_CURRENT_CTL_SHIFT	21 /* 3 bits, always 0x7 */
 #define   DPIO_PLL_REFCLK_SEL_SHIFT	16 /* 2 bits */
+#define   DPIO_PLL_REFCLK_SEL_MASK	3
 #define   DPIO_DRIVER_CTL_SHIFT		12 /* always set to 0x8 */
 #define   DPIO_CLK_BIAS_CTL_SHIFT	8 /* always set to 0x5 */
 #define _DPIO_REFSFR_B			0x8034
@@ -384,6 +384,9 @@
 
 #define DPIO_FASTCLK_DISABLE		0x8100
 
+#define DPIO_DATA_CHANNEL1		0x8220
+#define DPIO_DATA_CHANNEL2		0x8420
+
 /*
  * Fence registers
  */
@@ -521,6 +524,7 @@
  */
 # define _3D_CHICKEN2_WM_READ_PIPELINED			(1 << 14)
 #define _3D_CHICKEN3	0x02090
+#define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL		(1 << 10)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL		(1 << 5)
 
 #define MI_MODE		0x0209c
@@ -547,6 +551,8 @@
 #define IIR		0x020a4
 #define IMR		0x020a8
 #define ISR		0x020ac
+#define VLV_GUNIT_CLOCK_GATE	0x182060
+#define   GCFG_DIS		(1<<8)
 #define VLV_IIR_RW	0x182084
 #define VLV_IER		0x1820a0
 #define VLV_IIR		0x1820a4
@@ -661,6 +667,7 @@
 #define   MI_ARB_DISPLAY_PRIORITY_B_A		(1 << 0)	/* display B > display A */
 
 #define CACHE_MODE_0	0x02120 /* 915+ only */
+#define   CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
 #define   CM0_IZ_OPT_DISABLE      (1<<6)
 #define   CM0_ZR_OPT_DISABLE      (1<<5)
 #define	  CM0_STC_EVICT_DISABLE_LRA_SNB	(1<<5)
@@ -670,6 +677,8 @@
 #define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
 #define BB_ADDR		0x02140 /* 8 bytes */
 #define GFX_FLSH_CNTL	0x02170 /* 915+ only */
+#define GFX_FLSH_CNTL_GEN6	0x101008
+#define   GFX_FLSH_CNTL_EN	(1<<0)
 #define ECOSKPD		0x021d0
 #define   ECO_GATING_CX_ONLY	(1<<3)
 #define   ECO_FLIP_DONE		(1<<0)
@@ -1559,14 +1568,14 @@
 #define _VSYNCSHIFT_B	0x61028
 
 
-#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
+#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
+#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
 #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
 /* VGA port control */
 #define ADPA			0x61100
@@ -2641,6 +2650,7 @@
 #define   PIPECONF_GAMMA		(1<<24)
 #define   PIPECONF_FORCE_BORDER	(1<<25)
 #define   PIPECONF_INTERLACE_MASK	(7 << 21)
+#define   PIPECONF_INTERLACE_MASK_HSW	(3 << 21)
 /* Note that pre-gen3 does not support interlaced display directly. Panel
  * fitting must be disabled on pre-ilk for interlaced. */
 #define   PIPECONF_PROGRESSIVE			(0 << 21)
@@ -2711,7 +2721,7 @@
 #define   PIPE_12BPC				(3 << 5)
 
 #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
 #define PIPEDSL(pipe)  _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
 #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
 #define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@@ -2998,12 +3008,19 @@
 #define   DISPPLANE_GAMMA_ENABLE		(1<<30)
 #define   DISPPLANE_GAMMA_DISABLE		0
 #define   DISPPLANE_PIXFORMAT_MASK		(0xf<<26)
+#define   DISPPLANE_YUV422			(0x0<<26)
 #define   DISPPLANE_8BPP			(0x2<<26)
-#define   DISPPLANE_15_16BPP			(0x4<<26)
-#define   DISPPLANE_16BPP			(0x5<<26)
-#define   DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)
-#define   DISPPLANE_32BPP			(0x7<<26)
-#define   DISPPLANE_32BPP_30BIT_NO_ALPHA	(0xa<<26)
+#define   DISPPLANE_BGRA555			(0x3<<26)
+#define   DISPPLANE_BGRX555			(0x4<<26)
+#define   DISPPLANE_BGRX565			(0x5<<26)
+#define   DISPPLANE_BGRX888			(0x6<<26)
+#define   DISPPLANE_BGRA888			(0x7<<26)
+#define   DISPPLANE_RGBX101010			(0x8<<26)
+#define   DISPPLANE_RGBA101010			(0x9<<26)
+#define   DISPPLANE_BGRX101010			(0xa<<26)
+#define   DISPPLANE_RGBX161616			(0xc<<26)
+#define   DISPPLANE_RGBX888			(0xe<<26)
+#define   DISPPLANE_RGBA888			(0xf<<26)
 #define   DISPPLANE_STEREO_ENABLE		(1<<25)
 #define   DISPPLANE_STEREO_DISABLE		0
 #define   DISPPLANE_SEL_PIPE_SHIFT		24
@@ -3024,6 +3041,8 @@
 #define _DSPASIZE		0x70190
 #define _DSPASURF		0x7019C /* 965+ only */
 #define _DSPATILEOFF		0x701A4 /* 965+ only */
+#define _DSPAOFFSET		0x701A4 /* HSW */
+#define _DSPASURFLIVE		0x701AC
 
 #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
 #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3033,6 +3052,8 @@
 #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
 #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
 #define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
+#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
 
 /* Display/Sprite base address macros */
 #define DISP_BASEADDR_MASK	(0xfffff000)
@@ -3078,6 +3099,8 @@
 #define _DSPBSIZE		0x71190
 #define _DSPBSURF		0x7119C
 #define _DSPBTILEOFF		0x711A4
+#define _DSPBOFFSET		0x711A4
+#define _DSPBSURFLIVE		0x711AC
 
 /* Sprite A control */
 #define _DVSACNTR		0x72180
@@ -3143,6 +3166,7 @@
 #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
 #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
 #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
 
 #define _SPRA_CTL		0x70280
 #define   SPRITE_ENABLE			(1<<31)
@@ -3177,6 +3201,8 @@
 #define _SPRA_SURF		0x7029c
 #define _SPRA_KEYMAX		0x702a0
 #define _SPRA_TILEOFF		0x702a4
+#define _SPRA_OFFSET		0x702a4
+#define _SPRA_SURFLIVE		0x702ac
 #define _SPRA_SCALE		0x70304
 #define   SPRITE_SCALE_ENABLE	(1<<31)
 #define   SPRITE_FILTER_MASK	(3<<29)
@@ -3197,6 +3223,8 @@
 #define _SPRB_SURF		0x7129c
 #define _SPRB_KEYMAX		0x712a0
 #define _SPRB_TILEOFF		0x712a4
+#define _SPRB_OFFSET		0x712a4
+#define _SPRB_SURFLIVE		0x712ac
 #define _SPRB_SCALE		0x71304
 #define _SPRB_GAMC		0x71400
 
@@ -3210,8 +3238,10 @@
 #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
 #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
 #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
 #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
 #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
 
 /* VBIOS regs */
 #define VGACNTRL		0x71400
@@ -3246,12 +3276,6 @@
 #define DISPLAY_PORT_PLL_BIOS_1         0x46010
 #define DISPLAY_PORT_PLL_BIOS_2         0x46014
 
-#define PCH_DSPCLK_GATE_D	0x42020
-# define DPFCUNIT_CLOCK_GATE_DISABLE		(1 << 9)
-# define DPFCRUNIT_CLOCK_GATE_DISABLE		(1 << 8)
-# define DPFDUNIT_CLOCK_GATE_DISABLE		(1 << 7)
-# define DPARBUNIT_CLOCK_GATE_DISABLE		(1 << 5)
-
 #define PCH_3DCGDIS0		0x46020
 # define MARIUNIT_CLOCK_GATE_DISABLE		(1 << 18)
 # define SVSMUNIT_CLOCK_GATE_DISABLE		(1 << 1)
@@ -3301,20 +3325,22 @@
 #define _PIPEB_LINK_M2           0x61048
 #define _PIPEB_LINK_N2           0x6104c
 
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
 
 /* CPU panel fitter */
 /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
 #define _PFA_CTL_1               0x68080
 #define _PFB_CTL_1               0x68880
 #define  PF_ENABLE              (1<<31)
+#define  PF_PIPE_SEL_MASK_IVB	(3<<29)
+#define  PF_PIPE_SEL_IVB(pipe)	((pipe)<<29)
 #define  PF_FILTER_MASK		(3<<23)
 #define  PF_FILTER_PROGRAMMED	(0<<23)
 #define  PF_FILTER_MED_3x3	(1<<23)
@@ -3423,15 +3449,13 @@
 #define  ILK_HDCP_DISABLE		(1<<25)
 #define  ILK_eDP_A_DISABLE		(1<<24)
 #define  ILK_DESKTOP			(1<<23)
-#define ILK_DSPCLK_GATE		0x42020
-#define  IVB_VRHUNIT_CLK_GATE	(1<<28)
-#define  ILK_DPARB_CLK_GATE	(1<<5)
-#define  ILK_DPFD_CLK_GATE	(1<<7)
 
-/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
-#define   ILK_CLK_FBC		(1<<7)
-#define   ILK_DPFC_DIS1		(1<<8)
-#define   ILK_DPFC_DIS2		(1<<9)
+#define ILK_DSPCLK_GATE_D			0x42020
+#define   ILK_VRHUNIT_CLOCK_GATE_DISABLE	(1 << 28)
+#define   ILK_DPFCUNIT_CLOCK_GATE_DISABLE	(1 << 9)
+#define   ILK_DPFCRUNIT_CLOCK_GATE_DISABLE	(1 << 8)
+#define   ILK_DPFDUNIT_CLOCK_GATE_ENABLE	(1 << 7)
+#define   ILK_DPARBUNIT_CLOCK_GATE_ENABLE	(1 << 5)
 
 #define IVB_CHICKEN3	0x4200c
 # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE	(1 << 5)
@@ -3447,14 +3471,21 @@
 
 #define GEN7_L3CNTLREG1				0xB01C
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL			0x3C4FFF8C
+#define  GEN7_L3AGDIS				(1<<19)
 
 #define GEN7_L3_CHICKEN_MODE_REGISTER		0xB030
 #define  GEN7_WA_L3_CHICKEN_MODE				0x20000000
 
+#define GEN7_L3SQCREG4				0xb034
+#define  L3SQ_URB_READ_CAM_MATCH_DISABLE	(1<<27)
+
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG		0x9030
 #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB	(1<<11)
 
+#define HSW_FUSE_STRAP		0x42014
+#define  HSW_CDCLK_LIMIT	(1 << 24)
+
 /* PCH */
 
 /* south display engine interrupt: IBX */
@@ -3686,7 +3717,7 @@
 #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
 #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
-#define VLV_VIDEO_DIP_CTL_A		0x60220
+#define VLV_VIDEO_DIP_CTL_A		0x60200
 #define VLV_VIDEO_DIP_DATA_A		0x60208
 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A	0x60210
 
@@ -3795,18 +3826,26 @@
 #define  TRANS_6BPC             (2<<5)
 #define  TRANS_12BPC            (3<<5)
 
+#define _TRANSA_CHICKEN1	 0xf0060
+#define _TRANSB_CHICKEN1	 0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE	(1<<4)
 #define _TRANSA_CHICKEN2	 0xf0064
 #define _TRANSB_CHICKEN2	 0xf1064
 #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define   TRANS_AUTOTRAIN_GEN_STALL_DIS	(1<<31)
+#define  TRANS_CHICKEN2_TIMING_OVERRIDE		(1<<31)
+
 
 #define SOUTH_CHICKEN1		0xc2000
 #define  FDIA_PHASE_SYNC_SHIFT_OVR	19
 #define  FDIA_PHASE_SYNC_SHIFT_EN	18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define  FDI_BC_BIFURCATION_SELECT	(1 << 12)
 #define SOUTH_CHICKEN2		0xc2004
-#define  DPLS_EDP_PPS_FIX_DIS	(1<<0)
+#define  FDI_MPHY_IOSFSB_RESET_STATUS	(1<<13)
+#define  FDI_MPHY_IOSFSB_RESET_CTL	(1<<12)
+#define  DPLS_EDP_PPS_FIX_DIS		(1<<0)
 
 #define _FDI_RXA_CHICKEN         0xc200c
 #define _FDI_RXB_CHICKEN         0xc2010
@@ -3816,6 +3855,7 @@
 
 #define SOUTH_DSPCLK_GATE_D	0xc2020
 #define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define  PCH_LP_PARTITION_LEVEL_DISABLE  (1<<12)
 
 /* CPU: FDI_TX */
 #define _FDI_TXA_CTL             0x60100
@@ -3877,6 +3917,7 @@
 #define  FDI_FS_ERRC_ENABLE		(1<<27)
 #define  FDI_FE_ERRC_ENABLE		(1<<26)
 #define  FDI_DP_PORT_WIDTH_X8           (7<<19)
+#define  FDI_RX_POLARITY_REVERSED_LPT	(1<<16)
 #define  FDI_8BPC                       (0<<16)
 #define  FDI_10BPC                      (1<<16)
 #define  FDI_6BPC                       (2<<16)
@@ -3901,16 +3942,21 @@
 #define  FDI_PORT_WIDTH_2X_LPT			(1<<19)
 #define  FDI_PORT_WIDTH_1X_LPT			(0<<19)
 
-#define _FDI_RXA_MISC            0xf0010
-#define _FDI_RXB_MISC            0xf1010
+#define _FDI_RXA_MISC			0xf0010
+#define _FDI_RXB_MISC			0xf1010
+#define  FDI_RX_PWRDN_LANE1_MASK	(3<<26)
+#define  FDI_RX_PWRDN_LANE1_VAL(x)	((x)<<26)
+#define  FDI_RX_PWRDN_LANE0_MASK	(3<<24)
+#define  FDI_RX_PWRDN_LANE0_VAL(x)	((x)<<24)
+#define  FDI_RX_TP1_TO_TP2_48		(2<<20)
+#define  FDI_RX_TP1_TO_TP2_64		(3<<20)
+#define  FDI_RX_FDI_DELAY_90		(0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
 #define _FDI_RXA_TUSIZE1         0xf0030
 #define _FDI_RXA_TUSIZE2         0xf0038
 #define _FDI_RXB_TUSIZE1         0xf1030
 #define _FDI_RXB_TUSIZE2         0xf1038
-#define  FDI_RX_TP1_TO_TP2_48	(2<<20)
-#define  FDI_RX_TP1_TO_TP2_64	(3<<20)
-#define  FDI_RX_FDI_DELAY_90	(0x90<<0)
-#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
 #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
 #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
 
@@ -4003,6 +4049,11 @@
 #define  PANEL_LIGHT_ON_DELAY_SHIFT	0
 
 #define PCH_PP_OFF_DELAYS	0xc720c
+#define  PANEL_POWER_PORT_SELECT_MASK	(0x3 << 30)
+#define  PANEL_POWER_PORT_LVDS		(0 << 30)
+#define  PANEL_POWER_PORT_DP_A		(1 << 30)
+#define  PANEL_POWER_PORT_DP_C		(2 << 30)
+#define  PANEL_POWER_PORT_DP_D		(3 << 30)
 #define  PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
 #define  PANEL_POWER_DOWN_DELAY_SHIFT	16
 #define  PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
@@ -4050,7 +4101,7 @@
 #define TRANS_DP_CTL_A		0xe0300
 #define TRANS_DP_CTL_B		0xe1300
 #define TRANS_DP_CTL_C		0xe2300
-#define TRANS_DP_CTL(pipe)	(TRANS_DP_CTL_A + (pipe) * 0x01000)
+#define TRANS_DP_CTL(pipe)	_PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
 #define  TRANS_DP_OUTPUT_ENABLE	(1<<31)
 #define  TRANS_DP_PORT_SEL_B	(0<<29)
 #define  TRANS_DP_PORT_SEL_C	(1<<29)
@@ -4108,6 +4159,8 @@
 #define  FORCEWAKE_ACK_HSW			0x130044
 #define  FORCEWAKE_ACK				0x130090
 #define  FORCEWAKE_MT				0xa188 /* multi-threaded */
+#define   FORCEWAKE_KERNEL			0x1
+#define   FORCEWAKE_USER			0x2
 #define  FORCEWAKE_MT_ACK			0x130040
 #define  ECOBUS					0xa180
 #define    FORCEWAKE_MT_ENABLE			(1<<5)
@@ -4220,6 +4273,10 @@
 #define   GEN6_READ_OC_PARAMS			0xc
 #define   GEN6_PCODE_WRITE_MIN_FREQ_TABLE	0x8
 #define   GEN6_PCODE_READ_MIN_FREQ_TABLE	0x9
+#define	  GEN6_PCODE_WRITE_RC6VIDS		0x4
+#define	  GEN6_PCODE_READ_RC6VIDS		0x5
+#define   GEN6_ENCODE_RC6_VID(mv)		(((mv) / 5) - 245) < 0 ?: 0
+#define   GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
 #define GEN6_PCODE_DATA				0x138128
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
 
@@ -4251,6 +4308,15 @@
 #define GEN7_L3LOG_BASE			0xB070
 #define GEN7_L3LOG_SIZE			0x80
 
+#define GEN7_HALF_SLICE_CHICKEN1	0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2	0xf100
+#define   GEN7_MAX_PS_THREAD_DEP		(8<<12)
+#define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE	(1<<3)
+
+#define GEN7_ROW_CHICKEN2		0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2		0xf4f4
+#define   DOP_CLOCK_GATING_DISABLE	(1<<0)
+
 #define G4X_AUD_VID_DID			0x62020
 #define INTEL_AUDIO_DEVCL		0x808629FB
 #define INTEL_AUDIO_DEVBLC		0x80862801
@@ -4380,33 +4446,39 @@
 #define HSW_PWR_WELL_CTL6			0x45414
 
 /* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A		0x60400
-#define PIPE_DDI_FUNC_CTL_B		0x61400
-#define PIPE_DDI_FUNC_CTL_C		0x62400
-#define PIPE_DDI_FUNC_CTL_EDP		0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
-				       PIPE_DDI_FUNC_CTL_B)
-#define  PIPE_DDI_FUNC_ENABLE		(1<<31)
+#define TRANS_DDI_FUNC_CTL_A		0x60400
+#define TRANS_DDI_FUNC_CTL_B		0x61400
+#define TRANS_DDI_FUNC_CTL_C		0x62400
+#define TRANS_DDI_FUNC_CTL_EDP		0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
+						   TRANS_DDI_FUNC_CTL_B)
+#define  TRANS_DDI_FUNC_ENABLE		(1<<31)
 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define  PIPE_DDI_PORT_MASK		(7<<28)
-#define  PIPE_DDI_SELECT_PORT(x)	((x)<<28)
-#define  PIPE_DDI_MODE_SELECT_MASK	(7<<24)
-#define  PIPE_DDI_MODE_SELECT_HDMI	(0<<24)
-#define  PIPE_DDI_MODE_SELECT_DVI	(1<<24)
-#define  PIPE_DDI_MODE_SELECT_DP_SST	(2<<24)
-#define  PIPE_DDI_MODE_SELECT_DP_MST	(3<<24)
-#define  PIPE_DDI_MODE_SELECT_FDI	(4<<24)
-#define  PIPE_DDI_BPC_MASK		(7<<20)
-#define  PIPE_DDI_BPC_8			(0<<20)
-#define  PIPE_DDI_BPC_10		(1<<20)
-#define  PIPE_DDI_BPC_6			(2<<20)
-#define  PIPE_DDI_BPC_12		(3<<20)
-#define  PIPE_DDI_PVSYNC		(1<<17)
-#define  PIPE_DDI_PHSYNC		(1<<16)
-#define  PIPE_DDI_BFI_ENABLE		(1<<4)
-#define  PIPE_DDI_PORT_WIDTH_X1		(0<<1)
-#define  PIPE_DDI_PORT_WIDTH_X2		(1<<1)
-#define  PIPE_DDI_PORT_WIDTH_X4		(3<<1)
+#define  TRANS_DDI_PORT_MASK		(7<<28)
+#define  TRANS_DDI_SELECT_PORT(x)	((x)<<28)
+#define  TRANS_DDI_PORT_NONE		(0<<28)
+#define  TRANS_DDI_MODE_SELECT_MASK	(7<<24)
+#define  TRANS_DDI_MODE_SELECT_HDMI	(0<<24)
+#define  TRANS_DDI_MODE_SELECT_DVI	(1<<24)
+#define  TRANS_DDI_MODE_SELECT_DP_SST	(2<<24)
+#define  TRANS_DDI_MODE_SELECT_DP_MST	(3<<24)
+#define  TRANS_DDI_MODE_SELECT_FDI	(4<<24)
+#define  TRANS_DDI_BPC_MASK		(7<<20)
+#define  TRANS_DDI_BPC_8		(0<<20)
+#define  TRANS_DDI_BPC_10		(1<<20)
+#define  TRANS_DDI_BPC_6		(2<<20)
+#define  TRANS_DDI_BPC_12		(3<<20)
+#define  TRANS_DDI_PVSYNC		(1<<17)
+#define  TRANS_DDI_PHSYNC		(1<<16)
+#define  TRANS_DDI_EDP_INPUT_MASK	(7<<12)
+#define  TRANS_DDI_EDP_INPUT_A_ON	(0<<12)
+#define  TRANS_DDI_EDP_INPUT_A_ONOFF	(4<<12)
+#define  TRANS_DDI_EDP_INPUT_B_ONOFF	(5<<12)
+#define  TRANS_DDI_EDP_INPUT_C_ONOFF	(6<<12)
+#define  TRANS_DDI_BFI_ENABLE		(1<<4)
+#define  TRANS_DDI_PORT_WIDTH_X1	(0<<1)
+#define  TRANS_DDI_PORT_WIDTH_X2	(1<<1)
+#define  TRANS_DDI_PORT_WIDTH_X4	(3<<1)
 
 /* DisplayPort Transport Control */
 #define DP_TP_CTL_A			0x64040
@@ -4420,12 +4492,16 @@
 #define  DP_TP_CTL_LINK_TRAIN_MASK		(7<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT1		(0<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT2		(1<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT3		(4<<8)
+#define  DP_TP_CTL_LINK_TRAIN_IDLE		(2<<8)
 #define  DP_TP_CTL_LINK_TRAIN_NORMAL		(3<<8)
+#define  DP_TP_CTL_SCRAMBLE_DISABLE		(1<<7)
 
 /* DisplayPort Transport Status */
 #define DP_TP_STATUS_A			0x64044
 #define DP_TP_STATUS_B			0x64144
 #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define  DP_TP_STATUS_IDLE_DONE		(1<<25)
 #define  DP_TP_STATUS_AUTOTRAIN_DONE	(1<<12)
 
 /* DDI Buffer Control */
@@ -4444,6 +4520,7 @@
 #define  DDI_BUF_EMP_800MV_3_5DB_HSW		(8<<24)   /* Sel8 */
 #define  DDI_BUF_EMP_MASK			(0xf<<24)
 #define  DDI_BUF_IS_IDLE			(1<<7)
+#define  DDI_A_4_LANES				(1<<4)
 #define  DDI_PORT_WIDTH_X1			(0<<1)
 #define  DDI_PORT_WIDTH_X2			(1<<1)
 #define  DDI_PORT_WIDTH_X4			(3<<1)
@@ -4460,6 +4537,10 @@
 #define SBI_ADDR			0xC6000
 #define SBI_DATA			0xC6004
 #define SBI_CTL_STAT			0xC6008
+#define  SBI_CTL_DEST_ICLK		(0x0<<16)
+#define  SBI_CTL_DEST_MPHY		(0x1<<16)
+#define  SBI_CTL_OP_IORD		(0x2<<8)
+#define  SBI_CTL_OP_IOWR		(0x3<<8)
 #define  SBI_CTL_OP_CRRD		(0x6<<8)
 #define  SBI_CTL_OP_CRWR		(0x7<<8)
 #define  SBI_RESPONSE_FAIL		(0x1<<1)
@@ -4477,10 +4558,12 @@
 #define   SBI_SSCDIVINTPHASE_PROPAGATE		(1<<0)
 #define  SBI_SSCCTL				0x020c
 #define  SBI_SSCCTL6				0x060C
+#define   SBI_SSCCTL_PATHALT			(1<<3)
 #define   SBI_SSCCTL_DISABLE			(1<<0)
 #define  SBI_SSCAUXDIV6				0x0610
 #define   SBI_SSCAUXDIV_FINALDIV2SEL(x)		((x)<<4)
 #define  SBI_DBUFF0				0x2a00
+#define   SBI_DBUFF0_ENABLE			(1<<0)
 
 /* LPT PIXCLK_GATE */
 #define PIXCLK_GATE			0xC6020
@@ -4490,8 +4573,8 @@
 /* SPLL */
 #define SPLL_CTL			0x46020
 #define  SPLL_PLL_ENABLE		(1<<31)
-#define  SPLL_PLL_SCC			(1<<28)
-#define  SPLL_PLL_NON_SCC		(2<<28)
+#define  SPLL_PLL_SSC			(1<<28)
+#define  SPLL_PLL_NON_SSC		(2<<28)
 #define  SPLL_PLL_FREQ_810MHz		(0<<26)
 #define  SPLL_PLL_FREQ_1350MHz		(1<<26)
 
@@ -4500,7 +4583,7 @@
 #define WRPLL_CTL2			0x46060
 #define  WRPLL_PLL_ENABLE		(1<<31)
 #define  WRPLL_PLL_SELECT_SSC		(0x01<<28)
-#define  WRPLL_PLL_SELECT_NON_SCC	(0x02<<28)
+#define  WRPLL_PLL_SELECT_NON_SSC	(0x02<<28)
 #define  WRPLL_PLL_SELECT_LCPLL_2700	(0x03<<28)
 /* WRPLL divider programming */
 #define  WRPLL_DIVIDER_REFERENCE(x)	((x)<<0)
@@ -4517,21 +4600,36 @@
 #define  PORT_CLK_SEL_SPLL		(3<<29)
 #define  PORT_CLK_SEL_WRPLL1		(4<<29)
 #define  PORT_CLK_SEL_WRPLL2		(5<<29)
+#define  PORT_CLK_SEL_NONE		(7<<29)
 
-/* Pipe clock selection */
-#define PIPE_CLK_SEL_A			0x46140
-#define PIPE_CLK_SEL_B			0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
-/* For each pipe, we need to select the corresponding port clock */
-#define  PIPE_CLK_SEL_DISABLED		(0x0<<29)
-#define  PIPE_CLK_SEL_PORT(x)		((x+1)<<29)
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A			0x46140
+#define TRANS_CLK_SEL_B			0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define  TRANS_CLK_SEL_DISABLED		(0x0<<29)
+#define  TRANS_CLK_SEL_PORT(x)		((x+1)<<29)
+
+#define _TRANSA_MSA_MISC		0x60410
+#define _TRANSB_MSA_MISC		0x61410
+#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
+					       _TRANSB_MSA_MISC)
+#define  TRANS_MSA_SYNC_CLK		(1<<0)
+#define  TRANS_MSA_6_BPC		(0<<5)
+#define  TRANS_MSA_8_BPC		(1<<5)
+#define  TRANS_MSA_10_BPC		(2<<5)
+#define  TRANS_MSA_12_BPC		(3<<5)
+#define  TRANS_MSA_16_BPC		(4<<5)
 
 /* LCPLL Control */
 #define LCPLL_CTL			0x130040
 #define  LCPLL_PLL_DISABLE		(1<<31)
 #define  LCPLL_PLL_LOCK			(1<<30)
+#define  LCPLL_CLK_FREQ_MASK		(3<<26)
+#define  LCPLL_CLK_FREQ_450		(0<<26)
 #define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
+#define  LCPLL_CD_SOURCE_FCLK		(1<<21)
 
 /* Pipe WM_LINETIME - watermark line time */
 #define PIPE_WM_LINETIME_A		0x45270
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5854bddb1e9f..63d4d30c39de 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
 		reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
-		array = dev_priv->save_palette_a;
+		array = dev_priv->regfile.save_palette_a;
 	else
-		array = dev_priv->save_palette_b;
+		array = dev_priv->regfile.save_palette_b;
 
 	for (i = 0; i < 256; i++)
 		array[i] = I915_READ(reg + (i << 2));
@@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
 		reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
-		array = dev_priv->save_palette_a;
+		array = dev_priv->regfile.save_palette_a;
 	else
-		array = dev_priv->save_palette_b;
+		array = dev_priv->regfile.save_palette_b;
 
 	for (i = 0; i < 256; i++)
 		I915_WRITE(reg + (i << 2), array[i]);
@@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev)
 	u16 cr_index, cr_data, st01;
 
 	/* VGA color palette registers */
-	dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+	dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
 
 	/* MSR bits */
-	dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
-	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+	dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+	if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
 		cr_index = VGA_CR_INDEX_CGA;
 		cr_data = VGA_CR_DATA_CGA;
 		st01 = VGA_ST01_CGA;
@@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev)
 			   i915_read_indexed(dev, cr_index, cr_data, 0x11) &
 			   (~0x80));
 	for (i = 0; i <= 0x24; i++)
-		dev_priv->saveCR[i] =
+		dev_priv->regfile.saveCR[i] =
 			i915_read_indexed(dev, cr_index, cr_data, i);
 	/* Make sure we don't turn off CR group 0 writes */
-	dev_priv->saveCR[0x11] &= ~0x80;
+	dev_priv->regfile.saveCR[0x11] &= ~0x80;
 
 	/* Attribute controller registers */
 	I915_READ8(st01);
-	dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+	dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
 	for (i = 0; i <= 0x14; i++)
-		dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+		dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
 	I915_READ8(st01);
-	I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+	I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
 	I915_READ8(st01);
 
 	/* Graphics controller registers */
 	for (i = 0; i < 9; i++)
-		dev_priv->saveGR[i] =
+		dev_priv->regfile.saveGR[i] =
 			i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
 
-	dev_priv->saveGR[0x10] =
+	dev_priv->regfile.saveGR[0x10] =
 		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
-	dev_priv->saveGR[0x11] =
+	dev_priv->regfile.saveGR[0x11] =
 		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
-	dev_priv->saveGR[0x18] =
+	dev_priv->regfile.saveGR[0x18] =
 		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
 
 	/* Sequencer registers */
 	for (i = 0; i < 8; i++)
-		dev_priv->saveSR[i] =
+		dev_priv->regfile.saveSR[i] =
 			i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
 }
 
@@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev)
 	u16 cr_index, cr_data, st01;
 
 	/* MSR bits */
-	I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
-	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+	I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+	if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
 		cr_index = VGA_CR_INDEX_CGA;
 		cr_data = VGA_CR_DATA_CGA;
 		st01 = VGA_ST01_CGA;
@@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev)
 	/* Sequencer registers, don't write SR07 */
 	for (i = 0; i < 7; i++)
 		i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
-				   dev_priv->saveSR[i]);
+				   dev_priv->regfile.saveSR[i]);
 
 	/* CRT controller regs */
 	/* Enable CR group 0 writes */
-	i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+	i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
 	for (i = 0; i <= 0x24; i++)
-		i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+		i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
 
 	/* Graphics controller regs */
 	for (i = 0; i < 9; i++)
 		i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
-				   dev_priv->saveGR[i]);
+				   dev_priv->regfile.saveGR[i]);
 
 	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
-			   dev_priv->saveGR[0x10]);
+			   dev_priv->regfile.saveGR[0x10]);
 	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
-			   dev_priv->saveGR[0x11]);
+			   dev_priv->regfile.saveGR[0x11]);
 	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
-			   dev_priv->saveGR[0x18]);
+			   dev_priv->regfile.saveGR[0x18]);
 
 	/* Attribute controller registers */
 	I915_READ8(st01); /* switch back to index mode */
 	for (i = 0; i <= 0x14; i++)
-		i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+		i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
 	I915_READ8(st01); /* switch back to index mode */
-	I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+	I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
 	I915_READ8(st01);
 
 	/* VGA color palette registers */
-	I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+	I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
 }
 
 static void i915_save_modeset_reg(struct drm_device *dev)
@@ -244,156 +244,162 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 		return;
 
 	/* Cursor state */
-	dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
-	dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
-	dev_priv->saveCURABASE = I915_READ(_CURABASE);
-	dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
-	dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
-	dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
+	dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+	dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+	dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+	dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+	dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+	dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
 	if (IS_GEN2(dev))
-		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+		dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
-		dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+		dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+		dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
 	}
 
 	/* Pipe & plane A info */
-	dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
-	dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
+	dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+	dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
-		dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
-		dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
+		dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+		dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+		dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
 	} else {
-		dev_priv->saveFPA0 = I915_READ(_FPA0);
-		dev_priv->saveFPA1 = I915_READ(_FPA1);
-		dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
+		dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+		dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+		dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
 	}
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-		dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
-	dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
-	dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
-	dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
-	dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
-	dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
-	dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
+		dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+	dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+	dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+	dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+	dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+	dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+	dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
 	if (!HAS_PCH_SPLIT(dev))
-		dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+		dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
-		dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
-		dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
-		dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+		dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+		dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+		dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+		dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
 
-		dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
-		dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+		dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+		dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
 
-		dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
-		dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
-		dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+		dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+		dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+		dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
 
-		dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
-		dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
-		dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
-		dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
-		dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
-		dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
-		dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+		dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+		dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+		dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+		dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+		dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+		dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+		dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
 	}
 
-	dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
-	dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
-	dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
-	dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
-	dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
+	dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+	dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+	dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+	dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+	dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
-		dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+		dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+		dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
 	}
 	i915_save_palette(dev, PIPE_A);
-	dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
+	dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
 
 	/* Pipe & plane B info */
-	dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
-	dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
+	dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+	dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
-		dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
-		dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
+		dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+		dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+		dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
 	} else {
-		dev_priv->saveFPB0 = I915_READ(_FPB0);
-		dev_priv->saveFPB1 = I915_READ(_FPB1);
-		dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
+		dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+		dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+		dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
 	}
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-		dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
-	dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
-	dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
-	dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
-	dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
-	dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
-	dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
+		dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+	dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+	dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+	dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+	dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+	dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+	dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
 	if (!HAS_PCH_SPLIT(dev))
-		dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+		dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
-		dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
-		dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
-		dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+		dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+		dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+		dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+		dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
 
-		dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
-		dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+		dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+		dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
 
-		dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
-		dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
-		dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+		dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+		dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+		dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
 
-		dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
-		dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
-		dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
-		dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
-		dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
-		dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
-		dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+		dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+		dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+		dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+		dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+		dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+		dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+		dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
 	}
 
-	dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
-	dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
-	dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
-	dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
-	dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
+	dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+	dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+	dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+	dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+	dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
-		dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+		dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+		dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
 	}
 	i915_save_palette(dev, PIPE_B);
-	dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+	dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
 
 	/* Fences */
 	switch (INTEL_INFO(dev)->gen) {
 	case 7:
 	case 6:
 		for (i = 0; i < 16; i++)
-			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+			dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
 		break;
 	case 5:
 	case 4:
 		for (i = 0; i < 16; i++)
-			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+			dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
 		break;
 	case 3:
 		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
 			for (i = 0; i < 8; i++)
-				dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+				dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
 	case 2:
 		for (i = 0; i < 8; i++)
-			dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+			dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
 		break;
 	}
 
+	/* CRT state */
+	if (HAS_PCH_SPLIT(dev))
+		dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+	else
+		dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
 	return;
 }
 
@@ -412,20 +418,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	case 7:
 	case 6:
 		for (i = 0; i < 16; i++)
-			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
 		break;
 	case 5:
 	case 4:
 		for (i = 0; i < 16; i++)
-			I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+			I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
 		break;
 	case 3:
 	case 2:
 		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
 			for (i = 0; i < 8; i++)
-				I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+				I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
 		for (i = 0; i < 8; i++)
-			I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+			I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
 		break;
 	}
 
@@ -447,158 +453,164 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	}
 
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
-		I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+		I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+		I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
 	}
 
 	/* Pipe & plane A info */
 	/* Prime the clock */
-	if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
-		I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
+	if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+		I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
 			   ~DPLL_VCO_ENABLE);
 		POSTING_READ(dpll_a_reg);
 		udelay(150);
 	}
-	I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
-	I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
+	I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+	I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
 	/* Actually enable it */
-	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+	I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
 	POSTING_READ(dpll_a_reg);
 	udelay(150);
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+		I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
 		POSTING_READ(_DPLL_A_MD);
 	}
 	udelay(150);
 
 	/* Restore mode */
-	I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
-	I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
-	I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
-	I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
-	I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
-	I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
+	I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+	I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+	I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+	I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+	I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+	I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
 	if (!HAS_PCH_SPLIT(dev))
-		I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+		I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
-		I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
-		I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
-		I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+		I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+		I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+		I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+		I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
 
-		I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
-		I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+		I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+		I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
 
-		I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
-		I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
-		I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+		I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+		I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+		I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
 
-		I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
-		I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
-		I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
-		I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
-		I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
-		I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
-		I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+		I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+		I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+		I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+		I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+		I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+		I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+		I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
 	}
 
 	/* Restore plane info */
-	I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
-	I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
-	I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
-	I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
-	I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+	I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+	I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+	I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+	I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+	I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
-		I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+		I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+		I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
 	}
 
-	I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
+	I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
 
 	i915_restore_palette(dev, PIPE_A);
 	/* Enable the plane */
-	I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+	I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
 	I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
 
 	/* Pipe & plane B info */
-	if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
-		I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
+	if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+		I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
 			   ~DPLL_VCO_ENABLE);
 		POSTING_READ(dpll_b_reg);
 		udelay(150);
 	}
-	I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
-	I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
+	I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+	I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
 	/* Actually enable it */
-	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+	I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
 	POSTING_READ(dpll_b_reg);
 	udelay(150);
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+		I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
 		POSTING_READ(_DPLL_B_MD);
 	}
 	udelay(150);
 
 	/* Restore mode */
-	I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
-	I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
-	I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
-	I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
-	I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
-	I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
+	I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+	I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+	I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+	I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+	I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+	I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
 	if (!HAS_PCH_SPLIT(dev))
-		I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+		I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
-		I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
-		I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
-		I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+		I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+		I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+		I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+		I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
 
-		I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
-		I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+		I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+		I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
 
-		I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
-		I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
-		I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+		I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+		I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+		I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
 
-		I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
-		I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
-		I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
-		I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
-		I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
-		I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
-		I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+		I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+		I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+		I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+		I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+		I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+		I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+		I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
 	}
 
 	/* Restore plane info */
-	I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
-	I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
-	I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
-	I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
-	I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+	I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+	I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+	I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+	I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+	I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
-		I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+		I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+		I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
 	}
 
-	I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
+	I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
 
 	i915_restore_palette(dev, PIPE_B);
 	/* Enable the plane */
-	I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+	I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
 	I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
 
 	/* Cursor state */
-	I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
-	I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
-	I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
-	I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
-	I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
-	I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
+	I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+	I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+	I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+	I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+	I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+	I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
 	if (IS_GEN2(dev))
-		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+		I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+	/* CRT state */
+	if (HAS_PCH_SPLIT(dev))
+		I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+	else
+		I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
 
 	return;
 }
@@ -608,89 +620,84 @@ static void i915_save_display(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	/* Display arbitration control */
-	dev_priv->saveDSPARB = I915_READ(DSPARB);
+	dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
 
 	/* This is only meaningful in non-KMS mode */
-	/* Don't save them in KMS mode */
+	/* Don't regfile.save them in KMS mode */
 	i915_save_modeset_reg(dev);
 
-	/* CRT state */
-	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->saveADPA = I915_READ(PCH_ADPA);
-	} else {
-		dev_priv->saveADPA = I915_READ(ADPA);
-	}
-
 	/* LVDS state */
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
-		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
-		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
-		dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
-		dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
-		dev_priv->saveLVDS = I915_READ(PCH_LVDS);
+		dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+		dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+		dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+		dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
 	} else {
-		dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
-		dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-		dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+		dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
 		if (INTEL_INFO(dev)->gen >= 4)
-			dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+			dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
 		if (IS_MOBILE(dev) && !IS_I830(dev))
-			dev_priv->saveLVDS = I915_READ(LVDS);
+			dev_priv->regfile.saveLVDS = I915_READ(LVDS);
 	}
 
 	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
-		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+		dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
-		dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
-		dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+		dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
 	} else {
-		dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
-		dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
-		dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+		dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
 	}
 
-	/* Display Port state */
-	if (SUPPORTS_INTEGRATED_DP(dev)) {
-		dev_priv->saveDP_B = I915_READ(DP_B);
-		dev_priv->saveDP_C = I915_READ(DP_C);
-		dev_priv->saveDP_D = I915_READ(DP_D);
-		dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
-		dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
-		dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
-		dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
-		dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
-		dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
-		dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
-		dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Display Port state */
+		if (SUPPORTS_INTEGRATED_DP(dev)) {
+			dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+			dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+			dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+			dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+			dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+			dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+			dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+			dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+			dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+			dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+			dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+		}
+		/* FIXME: regfile.save TV & SDVO state */
 	}
-	/* FIXME: save TV & SDVO state */
 
-	/* Only save FBC state on the platform that supports FBC */
+	/* Only regfile.save FBC state on the platform that supports FBC */
 	if (I915_HAS_FBC(dev)) {
 		if (HAS_PCH_SPLIT(dev)) {
-			dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+			dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
 		} else if (IS_GM45(dev)) {
-			dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+			dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
 		} else {
-			dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
-			dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
-			dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
-			dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+			dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+			dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+			dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+			dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
 		}
 	}
 
 	/* VGA state */
-	dev_priv->saveVGA0 = I915_READ(VGA0);
-	dev_priv->saveVGA1 = I915_READ(VGA1);
-	dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+	dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+	dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+	dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
 	if (HAS_PCH_SPLIT(dev))
-		dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+		dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
 	else
-		dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+		dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
 
 	i915_save_vga(dev);
 }
@@ -700,97 +707,95 @@ static void i915_restore_display(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	/* Display arbitration */
-	I915_WRITE(DSPARB, dev_priv->saveDSPARB);
+	I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
 
-	/* Display port ratios (must be done before clock is set) */
-	if (SUPPORTS_INTEGRATED_DP(dev)) {
-		I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
-		I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
-		I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
-		I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
-		I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
-		I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
-		I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
-		I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Display port ratios (must be done before clock is set) */
+		if (SUPPORTS_INTEGRATED_DP(dev)) {
+			I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+			I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+			I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+			I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+			I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+			I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+			I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+			I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+		}
 	}
 
 	/* This is only meaningful in non-KMS mode */
 	/* Don't restore them in KMS mode */
 	i915_restore_modeset_reg(dev);
 
-	/* CRT state */
-	if (HAS_PCH_SPLIT(dev))
-		I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
-	else
-		I915_WRITE(ADPA, dev_priv->saveADPA);
-
 	/* LVDS state */
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+		I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
+		I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
 	} else if (IS_MOBILE(dev) && !IS_I830(dev))
-		I915_WRITE(LVDS, dev_priv->saveLVDS);
+		I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
 
 	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
-		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+		I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
-		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
 		/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
 		 * otherwise we get blank eDP screen after S3 on some machines
 		 */
-		I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
-		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
-		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
-		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
-		I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
-		I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+		I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+		I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 		I915_WRITE(RSTDBYCTL,
-			   dev_priv->saveMCHBAR_RENDER_STANDBY);
+			   dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
 	} else {
-		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
-		I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
-		I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
-		I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
-		I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
-		I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
-		I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+		I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+		I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+		I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+		I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 	}
 
-	/* Display Port state */
-	if (SUPPORTS_INTEGRATED_DP(dev)) {
-		I915_WRITE(DP_B, dev_priv->saveDP_B);
-		I915_WRITE(DP_C, dev_priv->saveDP_C);
-		I915_WRITE(DP_D, dev_priv->saveDP_D);
+	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Display Port state */
+		if (SUPPORTS_INTEGRATED_DP(dev)) {
+			I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+			I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+			I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+		}
+		/* FIXME: restore TV & SDVO state */
 	}
-	/* FIXME: restore TV & SDVO state */
 
 	/* only restore FBC info on the platform that supports FBC*/
 	intel_disable_fbc(dev);
 	if (I915_HAS_FBC(dev)) {
 		if (HAS_PCH_SPLIT(dev)) {
-			I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+			I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
 		} else if (IS_GM45(dev)) {
-			I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+			I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
 		} else {
-			I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
-			I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
-			I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
-			I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+			I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
+			I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
+			I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
+			I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
 		}
 	}
 	/* VGA state */
 	if (HAS_PCH_SPLIT(dev))
-		I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+		I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
 	else
-		I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+		I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
 
-	I915_WRITE(VGA0, dev_priv->saveVGA0);
-	I915_WRITE(VGA1, dev_priv->saveVGA1);
-	I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+	I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+	I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+	I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
 	POSTING_READ(VGA_PD);
 	udelay(150);
 
@@ -802,46 +807,45 @@ int i915_save_state(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+	pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
 
 	mutex_lock(&dev->struct_mutex);
 
-	/* Hardware status page */
-	dev_priv->saveHWS = I915_READ(HWS_PGA);
-
 	i915_save_display(dev);
 
-	/* Interrupt state */
-	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->saveDEIER = I915_READ(DEIER);
-		dev_priv->saveDEIMR = I915_READ(DEIMR);
-		dev_priv->saveGTIER = I915_READ(GTIER);
-		dev_priv->saveGTIMR = I915_READ(GTIMR);
-		dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
-		dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
-		dev_priv->saveMCHBAR_RENDER_STANDBY =
-			I915_READ(RSTDBYCTL);
-		dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
-	} else {
-		dev_priv->saveIER = I915_READ(IER);
-		dev_priv->saveIMR = I915_READ(IMR);
+	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Interrupt state */
+		if (HAS_PCH_SPLIT(dev)) {
+			dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+			dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+			dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+			dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+			dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+			dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+			dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+				I915_READ(RSTDBYCTL);
+			dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+		} else {
+			dev_priv->regfile.saveIER = I915_READ(IER);
+			dev_priv->regfile.saveIMR = I915_READ(IMR);
+		}
 	}
 
 	intel_disable_gt_powersave(dev);
 
 	/* Cache mode state */
-	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+	dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
 
 	/* Memory Arbitration state */
-	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+	dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
 
 	/* Scratch space */
 	for (i = 0; i < 16; i++) {
-		dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
-		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+		dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+		dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
 	}
 	for (i = 0; i < 3; i++)
-		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+		dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -853,41 +857,40 @@ int i915_restore_state(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+	pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
 
 	mutex_lock(&dev->struct_mutex);
 
-	/* Hardware status page */
-	I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
 	i915_restore_display(dev);
 
-	/* Interrupt state */
-	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(DEIER, dev_priv->saveDEIER);
-		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
-		I915_WRITE(GTIER, dev_priv->saveGTIER);
-		I915_WRITE(GTIMR, dev_priv->saveGTIMR);
-		I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
-		I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
-		I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
-	} else {
-		I915_WRITE(IER, dev_priv->saveIER);
-		I915_WRITE(IMR, dev_priv->saveIMR);
+	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Interrupt state */
+		if (HAS_PCH_SPLIT(dev)) {
+			I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+			I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+			I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+			I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+			I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+			I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+			I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+		} else {
+			I915_WRITE(IER, dev_priv->regfile.saveIER);
+			I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+		}
 	}
 
 	/* Cache mode state */
-	I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+	I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
 
 	/* Memory arbitration state */
-	I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+	I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
 
 	for (i = 0; i < 16; i++) {
-		I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
-		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
+		I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+		I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
 	}
 	for (i = 0; i < 3; i++)
-		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+		I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
 
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 903eebd2117a..9462081b1e60 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
 
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
 {
-	if (!IS_IVYBRIDGE(dev))
+	if (!HAS_L3_GPU_CACHE(dev))
 		return -EPERM;
 
 	if (offset % 4 != 0)
@@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
 	if (ret)
 		return ret;
 
-	if (!dev_priv->mm.l3_remap_info) {
+	if (!dev_priv->l3_parity.remap_info) {
 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 		if (!temp) {
 			mutex_unlock(&drm_dev->struct_mutex);
@@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
 	 * at this point it is left as a TODO.
 	*/
 	if (temp)
-		dev_priv->mm.l3_remap_info = temp;
+		dev_priv->l3_parity.remap_info = temp;
 
-	memcpy(dev_priv->mm.l3_remap_info + (offset/4),
+	memcpy(dev_priv->l3_parity.remap_info + (offset/4),
 	       buf + (offset/4),
 	       count);
 
@@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
+	mutex_lock(&dev_priv->rps.hw_lock);
 	ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return snprintf(buf, PAGE_SIZE, "%d", ret);
 }
@@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
+	mutex_lock(&dev_priv->rps.hw_lock);
 	ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return snprintf(buf, PAGE_SIZE, "%d", ret);
 }
@@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
 	val /= GT_FREQUENCY_MULTIPLIER;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
+	mutex_lock(&dev_priv->rps.hw_lock);
 
 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	hw_max = (rp_state_cap & 0xff);
 	hw_min = ((rp_state_cap & 0xff0000) >> 16);
 
 	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
-		mutex_unlock(&dev->struct_mutex);
+		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
 
@@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
 	dev_priv->rps.max_delay = val;
 
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return count;
 }
@@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
+	mutex_lock(&dev_priv->rps.hw_lock);
 	ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return snprintf(buf, PAGE_SIZE, "%d", ret);
 }
@@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 
 	val /= GT_FREQUENCY_MULTIPLIER;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
+	mutex_lock(&dev_priv->rps.hw_lock);
 
 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	hw_max = (rp_state_cap & 0xff);
 	hw_min = ((rp_state_cap & 0xff0000) >> 16);
 
 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
-		mutex_unlock(&dev->struct_mutex);
+		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
 
@@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 
 	dev_priv->rps.min_delay = val;
 
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return count;
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8134421b89a6..3db4a6817713 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
 );
 
 TRACE_EVENT(i915_gem_ring_dispatch,
-	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
-	    TP_ARGS(ring, seqno),
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+	    TP_ARGS(ring, seqno, flags),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
 			     __field(u32, ring)
 			     __field(u32, seqno)
+			     __field(u32, flags)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->dev = ring->dev->primary->index;
 			   __entry->ring = ring->id;
 			   __entry->seqno = seqno;
+			   __entry->flags = flags;
 			   i915_trace_irq_get(ring, seqno);
 			   ),
 
-	    TP_printk("dev=%u, ring=%u, seqno=%u",
-		      __entry->dev, __entry->ring, __entry->seqno)
+	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+		      __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
 );
 
 TRACE_EVENT(i915_gem_ring_flush,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 56846ed5ee55..55ffba1f5818 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -755,7 +755,8 @@ void intel_setup_bios(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	 /* Set the Panel Power On/Off timings if uninitialized. */
-	if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+	if (!HAS_PCH_SPLIT(dev) &&
+	    I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
 		/* Set T2 to 40ms and T5 to 200ms */
 		I915_WRITE(PP_ON_DELAYS, 0x019007d0);
 
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6345878ae1e7..9293878ec7eb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
 	if (mode->clock > max_clock)
 		return MODE_CLOCK_HIGH;
 
+	/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+	if (HAS_PCH_LPT(dev) &&
+	    (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+		return MODE_CLOCK_HIGH;
+
 	return MODE_OK;
 }
 
@@ -221,14 +226,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 adpa;
 
-	adpa = ADPA_HOTPLUG_BITS;
+	if (HAS_PCH_SPLIT(dev))
+		adpa = ADPA_HOTPLUG_BITS;
+	else
+		adpa = 0;
+
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 		adpa |= ADPA_HSYNC_ACTIVE_HIGH;
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
 	/* For CPT allow 3 pipe config, for others just use A or B */
-	if (HAS_PCH_CPT(dev))
+	if (HAS_PCH_LPT(dev))
+		; /* Those bits don't exist here */
+	else if (HAS_PCH_CPT(dev))
 		adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
 	else if (intel_crtc->pipe == 0)
 		adpa |= ADPA_PIPE_A_SELECT;
@@ -401,12 +412,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
 				struct i2c_adapter *adapter)
 {
 	struct edid *edid;
+	int ret;
 
 	edid = intel_crt_get_edid(connector, adapter);
 	if (!edid)
 		return 0;
 
-	return intel_connector_update_modes(connector, edid);
+	ret = intel_connector_update_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
 }
 
 static bool intel_crt_detect_ddc(struct drm_connector *connector)
@@ -644,10 +659,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
 static void intel_crt_reset(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crt *crt = intel_attached_crt(connector);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev)) {
+		u32 adpa;
+
+		adpa = I915_READ(PCH_ADPA);
+		adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+		adpa |= ADPA_HOTPLUG_BITS;
+		I915_WRITE(PCH_ADPA, adpa);
+		POSTING_READ(PCH_ADPA);
+
+		DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
 		crt->force_hotplug_required = 1;
+	}
+
 }
 
 /*
@@ -729,7 +756,7 @@ void intel_crt_init(struct drm_device *dev)
 
 	crt->base.type = INTEL_OUTPUT_ANALOG;
 	crt->base.cloneable = true;
-	if (IS_HASWELL(dev) || IS_I830(dev))
+	if (IS_I830(dev))
 		crt->base.crtc_mask = (1 << 0);
 	else
 		crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -749,7 +776,10 @@ void intel_crt_init(struct drm_device *dev)
 
 	crt->base.disable = intel_disable_crt;
 	crt->base.enable = intel_enable_crt;
-	crt->base.get_hw_state = intel_crt_get_hw_state;
+	if (IS_HASWELL(dev))
+		crt->base.get_hw_state = intel_ddi_get_hw_state;
+	else
+		crt->base.get_hw_state = intel_crt_get_hw_state;
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
 
 	drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@@ -766,18 +796,14 @@ void intel_crt_init(struct drm_device *dev)
 	 * Configure the automatic hotplug detection stuff
 	 */
 	crt->force_hotplug_required = 0;
-	if (HAS_PCH_SPLIT(dev)) {
-		u32 adpa;
-
-		adpa = I915_READ(PCH_ADPA);
-		adpa &= ~ADPA_CRT_HOTPLUG_MASK;
-		adpa |= ADPA_HOTPLUG_BITS;
-		I915_WRITE(PCH_ADPA, adpa);
-		POSTING_READ(PCH_ADPA);
-
-		DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
-		crt->force_hotplug_required = 1;
-	}
 
 	dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+
+	/*
+	 * TODO: find a proper way to discover whether we need to set the
+	 * polarity reversal bit or not, instead of relying on the BIOS.
+	 */
+	if (HAS_PCH_LPT(dev))
+		dev_priv->fdi_rx_polarity_reversed =
+		     !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
 }
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index bfe375466a0e..4bad0f724019 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -58,6 +58,26 @@ static const u32 hsw_ddi_translations_fdi[] = {
 	0x00FFFFFF, 0x00040006		/* HDMI parameters */
 };
 
+static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	int type = intel_encoder->type;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+	    type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+		struct intel_digital_port *intel_dig_port =
+			enc_to_dig_port(encoder);
+		return intel_dig_port->port;
+
+	} else if (type == INTEL_OUTPUT_ANALOG) {
+		return PORT_E;
+
+	} else {
+		DRM_ERROR("Invalid DDI encoder type %d\n", type);
+		BUG();
+	}
+}
+
 /* On Haswell, DDI port buffers must be programmed with correct values
  * in advance. The buffer values are different for FDI and DP modes,
  * but the HDMI/DVI fields are shared among those. So we program the DDI
@@ -118,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
 	DDI_BUF_EMP_800MV_3_5DB_HSW
 };
 
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+				    enum port port)
+{
+	uint32_t reg = DDI_BUF_CTL(port);
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		udelay(1);
+		if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+			return;
+	}
+	DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
 
 /* Starting with Haswell, different DDI ports can work in FDI mode for
  * connection to the PCH-located connectors. For this, it is necessary to train
@@ -133,25 +166,36 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
-	u32 reg, temp, i;
+	u32 temp, i, rx_ctl_val;
 
-	/* Configure CPU PLL, wait for warmup */
-	I915_WRITE(SPLL_CTL,
-			SPLL_PLL_ENABLE |
-			SPLL_PLL_FREQ_1350MHz |
-			SPLL_PLL_SCC);
+	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+	 * mode set "sequence for CRT port" document:
+	 * - TP1 to TP2 time with the default value
+	 * - FDI delay to 90h
+	 */
+	I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+				  FDI_RX_PWRDN_LANE0_VAL(2) |
+				  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
-	/* Use SPLL to drive the output when in FDI mode */
-	I915_WRITE(PORT_CLK_SEL(PORT_E),
-			PORT_CLK_SEL_SPLL);
-	I915_WRITE(PIPE_CLK_SEL(pipe),
-			PIPE_CLK_SEL_PORT(PORT_E));
+	/* Enable the PCH Receiver FDI PLL */
+	rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
+		     ((intel_crtc->fdi_lanes - 1) << 19);
+	if (dev_priv->fdi_rx_polarity_reversed)
+		rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
+	I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+	POSTING_READ(_FDI_RXA_CTL);
+	udelay(220);
 
-	udelay(20);
+	/* Switch from Rawclk to PCDclk */
+	rx_ctl_val |= FDI_PCDCLK;
+	I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
 
-	/* Start the training iterating through available voltages and emphasis */
-	for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+	/* Configure Port Clock Select */
+	I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+
+	/* Start the training iterating through available voltages and emphasis,
+	 * testing each value twice. */
+	for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
 		/* Configure DP_TP_CTL with auto-training */
 		I915_WRITE(DP_TP_CTL(PORT_E),
 					DP_TP_CTL_FDI_AUTOTRAIN |
@@ -160,103 +204,75 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
 					DP_TP_CTL_ENABLE);
 
 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
-		temp = I915_READ(DDI_BUF_CTL(PORT_E));
-		temp = (temp & ~DDI_BUF_EMP_MASK);
 		I915_WRITE(DDI_BUF_CTL(PORT_E),
-				temp |
-				DDI_BUF_CTL_ENABLE |
-				DDI_PORT_WIDTH_X2 |
-				hsw_ddi_buf_ctl_values[i]);
+			   DDI_BUF_CTL_ENABLE |
+			   ((intel_crtc->fdi_lanes - 1) << 1) |
+			   hsw_ddi_buf_ctl_values[i / 2]);
+		POSTING_READ(DDI_BUF_CTL(PORT_E));
 
 		udelay(600);
 
-		/* We need to program FDI_RX_MISC with the default TP1 to TP2
-		 * values before enabling the receiver, and configure the delay
-		 * for the FDI timing generator to 90h. Luckily, all the other
-		 * bits are supposed to be zeroed, so we can write those values
-		 * directly.
-		 */
-		I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
-				FDI_RX_FDI_DELAY_90);
+		/* Program PCH FDI Receiver TU */
+		I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
 
-		/* Enable CPU FDI Receiver with auto-training */
-		reg = FDI_RX_CTL(pipe);
-		I915_WRITE(reg,
-				I915_READ(reg) |
-					FDI_LINK_TRAIN_AUTO |
-					FDI_RX_ENABLE |
-					FDI_LINK_TRAIN_PATTERN_1_CPT |
-					FDI_RX_ENHANCE_FRAME_ENABLE |
-					FDI_PORT_WIDTH_2X_LPT |
-					FDI_RX_PLL_ENABLE);
-		POSTING_READ(reg);
-		udelay(100);
+		/* Enable PCH FDI Receiver with auto-training */
+		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+		I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+		POSTING_READ(_FDI_RXA_CTL);
+
+		/* Wait for FDI receiver lane calibration */
+		udelay(30);
+
+		/* Unset FDI_RX_MISC pwrdn lanes */
+		temp = I915_READ(_FDI_RXA_MISC);
+		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+		I915_WRITE(_FDI_RXA_MISC, temp);
+		POSTING_READ(_FDI_RXA_MISC);
+
+		/* Wait for FDI auto training time */
+		udelay(5);
 
 		temp = I915_READ(DP_TP_STATUS(PORT_E));
 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
-			DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+			DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
 
 			/* Enable normal pixel sending for FDI */
 			I915_WRITE(DP_TP_CTL(PORT_E),
-						DP_TP_CTL_FDI_AUTOTRAIN |
-						DP_TP_CTL_LINK_TRAIN_NORMAL |
-						DP_TP_CTL_ENHANCED_FRAME_ENABLE |
-						DP_TP_CTL_ENABLE);
+				   DP_TP_CTL_FDI_AUTOTRAIN |
+				   DP_TP_CTL_LINK_TRAIN_NORMAL |
+				   DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+				   DP_TP_CTL_ENABLE);
 
-			/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
-			temp = I915_READ(DDI_FUNC_CTL(pipe));
-			temp &= ~PIPE_DDI_PORT_MASK;
-			temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
-					PIPE_DDI_MODE_SELECT_FDI |
-					PIPE_DDI_FUNC_ENABLE |
-					PIPE_DDI_PORT_WIDTH_X2;
-			I915_WRITE(DDI_FUNC_CTL(pipe),
-					temp);
-			break;
-		} else {
-			DRM_ERROR("Error training BUF_CTL %d\n", i);
-
-			/* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
-			I915_WRITE(DP_TP_CTL(PORT_E),
-					I915_READ(DP_TP_CTL(PORT_E)) &
-						~DP_TP_CTL_ENABLE);
-			I915_WRITE(FDI_RX_CTL(pipe),
-					I915_READ(FDI_RX_CTL(pipe)) &
-						~FDI_RX_PLL_ENABLE);
-			continue;
+			return;
 		}
+
+		temp = I915_READ(DDI_BUF_CTL(PORT_E));
+		temp &= ~DDI_BUF_CTL_ENABLE;
+		I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+		POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+		temp = I915_READ(DP_TP_CTL(PORT_E));
+		temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+		temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+		I915_WRITE(DP_TP_CTL(PORT_E), temp);
+		POSTING_READ(DP_TP_CTL(PORT_E));
+
+		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+		rx_ctl_val &= ~FDI_RX_ENABLE;
+		I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+		POSTING_READ(_FDI_RXA_CTL);
+
+		/* Reset FDI_RX_MISC pwrdn lanes */
+		temp = I915_READ(_FDI_RXA_MISC);
+		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+		temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+		I915_WRITE(_FDI_RXA_MISC, temp);
+		POSTING_READ(_FDI_RXA_MISC);
 	}
 
-	DRM_DEBUG_KMS("FDI train done.\n");
-}
-
-/* For DDI connections, it is possible to support different outputs over the
- * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
- * the time the output is detected what exactly is on the other end of it. This
- * function aims at providing support for this detection and proper output
- * configuration.
- */
-void intel_ddi_init(struct drm_device *dev, enum port port)
-{
-	/* For now, we don't do any proper output detection and assume that we
-	 * handle HDMI only */
-
-	switch(port){
-	case PORT_A:
-		/* We don't handle eDP and DP yet */
-		DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
-		break;
-	/* Assume that the  ports B, C and D are working in HDMI mode for now */
-	case PORT_B:
-	case PORT_C:
-	case PORT_D:
-		intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
-		break;
-	default:
-		DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
-				port);
-		break;
-	}
+	DRM_ERROR("FDI link training failed!\n");
 }
 
 /* WRPLL clock dividers */
@@ -645,116 +661,435 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
 	{298000,	2,	21,	19},
 };
 
-void intel_ddi_mode_set(struct drm_encoder *encoder,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+static void intel_ddi_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
 {
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = encoder->crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-	int port = intel_hdmi->ddi_port;
+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+	int port = intel_ddi_get_encoder_port(intel_encoder);
 	int pipe = intel_crtc->pipe;
-	int p, n2, r2;
-	u32 temp, i;
+	int type = intel_encoder->type;
 
-	/* On Haswell, we need to enable the clocks and prepare DDI function to
-	 * work in HDMI mode for this pipe.
-	 */
-	DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+	DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
+		      port_name(port), pipe_name(pipe));
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+		switch (intel_dp->lane_count) {
+		case 1:
+			intel_dp->DP |= DDI_PORT_WIDTH_X1;
+			break;
+		case 2:
+			intel_dp->DP |= DDI_PORT_WIDTH_X2;
+			break;
+		case 4:
+			intel_dp->DP |= DDI_PORT_WIDTH_X4;
+			break;
+		default:
+			intel_dp->DP |= DDI_PORT_WIDTH_X4;
+			WARN(1, "Unexpected DP lane count %d\n",
+			     intel_dp->lane_count);
+			break;
+		}
+
+		if (intel_dp->has_audio) {
+			DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
+					 pipe_name(intel_crtc->pipe));
+
+			/* write eld */
+			DRM_DEBUG_DRIVER("DP audio: write eld information\n");
+			intel_write_eld(encoder, adjusted_mode);
+		}
+
+		intel_dp_init_link_config(intel_dp);
+
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+		if (intel_hdmi->has_audio) {
+			/* Proper support for digital audio needs a new logic
+			 * and a new set of registers, so we leave it for future
+			 * patch bombing.
+			 */
+			DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+					 pipe_name(intel_crtc->pipe));
+
+			/* write eld */
+			DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+			intel_write_eld(encoder, adjusted_mode);
+		}
+
+		intel_hdmi->set_infoframes(encoder, adjusted_mode);
+	}
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder, *ret = NULL;
+	int num_encoders = 0;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		ret = intel_encoder;
+		num_encoders++;
+	}
+
+	if (num_encoders != 1)
+		WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
+		     intel_crtc->pipe);
+
+	BUG_ON(ret == NULL);
+	return ret;
+}
+
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t val;
+
+	switch (intel_crtc->ddi_pll_sel) {
+	case PORT_CLK_SEL_SPLL:
+		plls->spll_refcount--;
+		if (plls->spll_refcount == 0) {
+			DRM_DEBUG_KMS("Disabling SPLL\n");
+			val = I915_READ(SPLL_CTL);
+			WARN_ON(!(val & SPLL_PLL_ENABLE));
+			I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+			POSTING_READ(SPLL_CTL);
+		}
+		break;
+	case PORT_CLK_SEL_WRPLL1:
+		plls->wrpll1_refcount--;
+		if (plls->wrpll1_refcount == 0) {
+			DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+			val = I915_READ(WRPLL_CTL1);
+			WARN_ON(!(val & WRPLL_PLL_ENABLE));
+			I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+			POSTING_READ(WRPLL_CTL1);
+		}
+		break;
+	case PORT_CLK_SEL_WRPLL2:
+		plls->wrpll2_refcount--;
+		if (plls->wrpll2_refcount == 0) {
+			DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+			val = I915_READ(WRPLL_CTL2);
+			WARN_ON(!(val & WRPLL_PLL_ENABLE));
+			I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+			POSTING_READ(WRPLL_CTL2);
+		}
+		break;
+	}
+
+	WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
+	WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
+	WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
+
+	intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
+{
+	u32 i;
 
 	for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
-		if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
+		if (clock <= wrpll_tmds_clock_table[i].clock)
 			break;
 
 	if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
 		i--;
 
-	p = wrpll_tmds_clock_table[i].p;
-	n2 = wrpll_tmds_clock_table[i].n2;
-	r2 = wrpll_tmds_clock_table[i].r2;
+	*p = wrpll_tmds_clock_table[i].p;
+	*n2 = wrpll_tmds_clock_table[i].n2;
+	*r2 = wrpll_tmds_clock_table[i].r2;
 
-	if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
-		DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
-			 wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+	if (wrpll_tmds_clock_table[i].clock != clock)
+		DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
+			 wrpll_tmds_clock_table[i].clock, clock);
 
-	DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
-		      crtc->mode.clock, p, n2, r2);
+	DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+		      clock, *p, *n2, *r2);
+}
 
-	/* Enable LCPLL if disabled */
-	temp = I915_READ(LCPLL_CTL);
-	if (temp & LCPLL_PLL_DISABLE)
-		I915_WRITE(LCPLL_CTL,
-				temp & ~LCPLL_PLL_DISABLE);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+	int type = intel_encoder->type;
+	enum pipe pipe = intel_crtc->pipe;
+	uint32_t reg, val;
 
-	/* Configure WR PLL 1, program the correct divider values for
-	 * the desired frequency and wait for warmup */
-	I915_WRITE(WRPLL_CTL1,
-			WRPLL_PLL_ENABLE |
-			WRPLL_PLL_SELECT_LCPLL_2700 |
-			WRPLL_DIVIDER_REFERENCE(r2) |
-			WRPLL_DIVIDER_FEEDBACK(n2) |
-			WRPLL_DIVIDER_POST(p));
+	/* TODO: reuse PLLs when possible (compare values) */
 
-	udelay(20);
+	intel_ddi_put_crtc_pll(crtc);
 
-	/* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
-	 * this port for connection.
-	 */
-	I915_WRITE(PORT_CLK_SEL(port),
-			PORT_CLK_SEL_WRPLL1);
-	I915_WRITE(PIPE_CLK_SEL(pipe),
-			PIPE_CLK_SEL_PORT(port));
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-	udelay(20);
+		switch (intel_dp->link_bw) {
+		case DP_LINK_BW_1_62:
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+			break;
+		case DP_LINK_BW_2_7:
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+			break;
+		case DP_LINK_BW_5_4:
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+			break;
+		default:
+			DRM_ERROR("Link bandwidth %d unsupported\n",
+				  intel_dp->link_bw);
+			return false;
+		}
 
-	if (intel_hdmi->has_audio) {
-		/* Proper support for digital audio needs a new logic and a new set
-		 * of registers, so we leave it for future patch bombing.
-		 */
-		DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
-				 pipe_name(intel_crtc->pipe));
+		/* We don't need to turn any PLL on because we'll use LCPLL. */
+		return true;
 
-		/* write eld */
-		DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
-		intel_write_eld(encoder, adjusted_mode);
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		int p, n2, r2;
+
+		if (plls->wrpll1_refcount == 0) {
+			DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+				      pipe_name(pipe));
+			plls->wrpll1_refcount++;
+			reg = WRPLL_CTL1;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+		} else if (plls->wrpll2_refcount == 0) {
+			DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+				      pipe_name(pipe));
+			plls->wrpll2_refcount++;
+			reg = WRPLL_CTL2;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+		} else {
+			DRM_ERROR("No WRPLLs available!\n");
+			return false;
+		}
+
+		WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
+		     "WRPLL already enabled\n");
+
+		intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
+
+		val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+		      WRPLL_DIVIDER_POST(p);
+
+	} else if (type == INTEL_OUTPUT_ANALOG) {
+		if (plls->spll_refcount == 0) {
+			DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+				      pipe_name(pipe));
+			plls->spll_refcount++;
+			reg = SPLL_CTL;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+		}
+
+		WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
+		     "SPLL already enabled\n");
+
+		val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+	} else {
+		WARN(1, "Invalid DDI encoder type %d\n", type);
+		return false;
 	}
 
-	/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
-	temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+	I915_WRITE(reg, val);
+	udelay(20);
+
+	return true;
+}
+
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+	int type = intel_encoder->type;
+	uint32_t temp;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+		temp = TRANS_MSA_SYNC_CLK;
+		switch (intel_crtc->bpp) {
+		case 18:
+			temp |= TRANS_MSA_6_BPC;
+			break;
+		case 24:
+			temp |= TRANS_MSA_8_BPC;
+			break;
+		case 30:
+			temp |= TRANS_MSA_10_BPC;
+			break;
+		case 36:
+			temp |= TRANS_MSA_12_BPC;
+			break;
+		default:
+			temp |= TRANS_MSA_8_BPC;
+			WARN(1, "%d bpp unsupported by DDI function\n",
+			     intel_crtc->bpp);
+		}
+		I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+	}
+}
+
+void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	enum pipe pipe = intel_crtc->pipe;
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+	uint32_t temp;
+
+	/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+	temp = TRANS_DDI_FUNC_ENABLE;
+	temp |= TRANS_DDI_SELECT_PORT(port);
 
 	switch (intel_crtc->bpp) {
 	case 18:
-		temp |= PIPE_DDI_BPC_6;
+		temp |= TRANS_DDI_BPC_6;
 		break;
 	case 24:
-		temp |= PIPE_DDI_BPC_8;
+		temp |= TRANS_DDI_BPC_8;
 		break;
 	case 30:
-		temp |= PIPE_DDI_BPC_10;
+		temp |= TRANS_DDI_BPC_10;
 		break;
 	case 36:
-		temp |= PIPE_DDI_BPC_12;
+		temp |= TRANS_DDI_BPC_12;
 		break;
 	default:
-		WARN(1, "%d bpp unsupported by pipe DDI function\n",
+		WARN(1, "%d bpp unsupported by transcoder DDI function\n",
 		     intel_crtc->bpp);
 	}
 
-	if (intel_hdmi->has_hdmi_sink)
-		temp |= PIPE_DDI_MODE_SELECT_HDMI;
+	if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+		temp |= TRANS_DDI_PVSYNC;
+	if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+		temp |= TRANS_DDI_PHSYNC;
+
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		switch (pipe) {
+		case PIPE_A:
+			temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+			break;
+		case PIPE_B:
+			temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+			break;
+		case PIPE_C:
+			temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+
+	if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+		if (intel_hdmi->has_hdmi_sink)
+			temp |= TRANS_DDI_MODE_SELECT_HDMI;
+		else
+			temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+	} else if (type == INTEL_OUTPUT_ANALOG) {
+		temp |= TRANS_DDI_MODE_SELECT_FDI;
+		temp |= (intel_crtc->fdi_lanes - 1) << 1;
+
+	} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+		   type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+		switch (intel_dp->lane_count) {
+		case 1:
+			temp |= TRANS_DDI_PORT_WIDTH_X1;
+			break;
+		case 2:
+			temp |= TRANS_DDI_PORT_WIDTH_X2;
+			break;
+		case 4:
+			temp |= TRANS_DDI_PORT_WIDTH_X4;
+			break;
+		default:
+			temp |= TRANS_DDI_PORT_WIDTH_X4;
+			WARN(1, "Unsupported lane count %d\n",
+			     intel_dp->lane_count);
+		}
+
+	} else {
+		WARN(1, "Invalid encoder type %d for pipe %d\n",
+		     intel_encoder->type, pipe);
+	}
+
+	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+				       enum transcoder cpu_transcoder)
+{
+	uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+	uint32_t val = I915_READ(reg);
+
+	val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+	val |= TRANS_DDI_PORT_NONE;
+	I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+	struct drm_device *dev = intel_connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder = intel_connector->encoder;
+	int type = intel_connector->base.connector_type;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	enum pipe pipe = 0;
+	enum transcoder cpu_transcoder;
+	uint32_t tmp;
+
+	if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+		return false;
+
+	if (port == PORT_A)
+		cpu_transcoder = TRANSCODER_EDP;
 	else
-		temp |= PIPE_DDI_MODE_SELECT_DVI;
+		cpu_transcoder = pipe;
 
-	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
-		temp |= PIPE_DDI_PVSYNC;
-	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
-		temp |= PIPE_DDI_PHSYNC;
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 
-	I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+	switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+	case TRANS_DDI_MODE_SELECT_HDMI:
+	case TRANS_DDI_MODE_SELECT_DVI:
+		return (type == DRM_MODE_CONNECTOR_HDMIA);
 
-	intel_hdmi->set_infoframes(encoder, adjusted_mode);
+	case TRANS_DDI_MODE_SELECT_DP_SST:
+		if (type == DRM_MODE_CONNECTOR_eDP)
+			return true;
+	case TRANS_DDI_MODE_SELECT_DP_MST:
+		return (type == DRM_MODE_CONNECTOR_DisplayPort);
+
+	case TRANS_DDI_MODE_SELECT_FDI:
+		return (type == DRM_MODE_CONNECTOR_VGA);
+
+	default:
+		return false;
+	}
 }
 
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -762,58 +1097,418 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	enum port port = intel_ddi_get_encoder_port(encoder);
 	u32 tmp;
 	int i;
 
-	tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+	tmp = I915_READ(DDI_BUF_CTL(port));
 
 	if (!(tmp & DDI_BUF_CTL_ENABLE))
 		return false;
 
-	for_each_pipe(i) {
-		tmp = I915_READ(DDI_FUNC_CTL(i));
+	if (port == PORT_A) {
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
-		if ((tmp & PIPE_DDI_PORT_MASK)
-		    == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
-			*pipe = i;
-			return true;
+		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+		case TRANS_DDI_EDP_INPUT_A_ON:
+		case TRANS_DDI_EDP_INPUT_A_ONOFF:
+			*pipe = PIPE_A;
+			break;
+		case TRANS_DDI_EDP_INPUT_B_ONOFF:
+			*pipe = PIPE_B;
+			break;
+		case TRANS_DDI_EDP_INPUT_C_ONOFF:
+			*pipe = PIPE_C;
+			break;
+		}
+
+		return true;
+	} else {
+		for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+			tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+			if ((tmp & TRANS_DDI_PORT_MASK)
+			    == TRANS_DDI_SELECT_PORT(port)) {
+				*pipe = i;
+				return true;
+			}
 		}
 	}
 
-	DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+	DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
 
 	return true;
 }
 
-void intel_enable_ddi(struct intel_encoder *encoder)
+static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
+				       enum pipe pipe)
+{
+	uint32_t temp, ret;
+	enum port port;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+	int i;
+
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		port = PORT_A;
+	} else {
+		temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+		temp &= TRANS_DDI_PORT_MASK;
+
+		for (i = PORT_B; i <= PORT_E; i++)
+			if (temp == TRANS_DDI_SELECT_PORT(i))
+				port = i;
+	}
+
+	ret = I915_READ(PORT_CLK_SEL(port));
+
+	DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
+		      pipe_name(pipe), port_name(port), ret);
+
+	return ret;
+}
+
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
 {
-	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-	int port = intel_hdmi->ddi_port;
-	u32 temp;
+	enum pipe pipe;
+	struct intel_crtc *intel_crtc;
 
-	temp = I915_READ(DDI_BUF_CTL(port));
-	temp |= DDI_BUF_CTL_ENABLE;
+	for_each_pipe(pipe) {
+		intel_crtc =
+			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
-	/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
-	 * and swing/emphasis values are ignored so nothing special needs
-	 * to be done besides enabling the port.
+		if (!intel_crtc->active)
+			continue;
+
+		intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+								 pipe);
+
+		switch (intel_crtc->ddi_pll_sel) {
+		case PORT_CLK_SEL_SPLL:
+			dev_priv->ddi_plls.spll_refcount++;
+			break;
+		case PORT_CLK_SEL_WRPLL1:
+			dev_priv->ddi_plls.wrpll1_refcount++;
+			break;
+		case PORT_CLK_SEL_WRPLL2:
+			dev_priv->ddi_plls.wrpll2_refcount++;
+			break;
+		}
+	}
+}
+
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+	if (cpu_transcoder != TRANSCODER_EDP)
+		I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+			   TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+	if (cpu_transcoder != TRANSCODER_EDP)
+		I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+			   TRANS_CLK_SEL_DISABLED);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+
+	if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		ironlake_edp_panel_vdd_on(intel_dp);
+		ironlake_edp_panel_on(intel_dp);
+		ironlake_edp_panel_vdd_off(intel_dp, true);
+	}
+
+	WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+	I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+		intel_dp_start_link_train(intel_dp);
+		intel_dp_complete_link_train(intel_dp);
+	}
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+	uint32_t val;
+	bool wait = false;
+
+	val = I915_READ(DDI_BUF_CTL(port));
+	if (val & DDI_BUF_CTL_ENABLE) {
+		val &= ~DDI_BUF_CTL_ENABLE;
+		I915_WRITE(DDI_BUF_CTL(port), val);
+		wait = true;
+	}
+
+	val = I915_READ(DP_TP_CTL(port));
+	val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+	val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+	I915_WRITE(DP_TP_CTL(port), val);
+
+	if (wait)
+		intel_wait_ddi_buf_idle(dev_priv, port);
+
+	if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		ironlake_edp_panel_vdd_on(intel_dp);
+		ironlake_edp_panel_off(intel_dp);
+	}
+
+	I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+
+	if (type == INTEL_OUTPUT_HDMI) {
+		/* In HDMI/DVI mode, the port width, and swing/emphasis values
+		 * are ignored so nothing special needs to be done besides
+		 * enabling the port.
+		 */
+		I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
+	} else if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		ironlake_edp_backlight_on(intel_dp);
+	}
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	int type = intel_encoder->type;
+
+	if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		ironlake_edp_backlight_off(intel_dp);
+	}
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+	if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+		return 450;
+	else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
+		 LCPLL_CLK_FREQ_450)
+		return 450;
+	else if (IS_ULT(dev_priv->dev))
+		return 338;
+	else
+		return 540;
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val = I915_READ(LCPLL_CTL);
+
+	/* The LCPLL register should be turned on by the BIOS. For now let's
+	 * just check its state and print errors in case something is wrong.
+	 * Don't even try to turn it on.
 	 */
-	I915_WRITE(DDI_BUF_CTL(port), temp);
+
+	DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
+		      intel_ddi_get_cdclk_freq(dev_priv));
+
+	if (val & LCPLL_CD_SOURCE_FCLK)
+		DRM_ERROR("CDCLK source is not LCPLL\n");
+
+	if (val & LCPLL_PLL_DISABLE)
+		DRM_ERROR("LCPLL is disabled\n");
 }
 
-void intel_disable_ddi(struct intel_encoder *encoder)
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
 {
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-	int port = intel_hdmi->ddi_port;
-	u32 temp;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	enum port port = intel_dig_port->port;
+	bool wait;
+	uint32_t val;
 
-	temp = I915_READ(DDI_BUF_CTL(port));
-	temp &= ~DDI_BUF_CTL_ENABLE;
+	if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+		val = I915_READ(DDI_BUF_CTL(port));
+		if (val & DDI_BUF_CTL_ENABLE) {
+			val &= ~DDI_BUF_CTL_ENABLE;
+			I915_WRITE(DDI_BUF_CTL(port), val);
+			wait = true;
+		}
 
-	I915_WRITE(DDI_BUF_CTL(port), temp);
+		val = I915_READ(DP_TP_CTL(port));
+		val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+		val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+		I915_WRITE(DP_TP_CTL(port), val);
+		POSTING_READ(DP_TP_CTL(port));
+
+		if (wait)
+			intel_wait_ddi_buf_idle(dev_priv, port);
+	}
+
+	val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+	      DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+	if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+		val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+	I915_WRITE(DP_TP_CTL(port), val);
+	POSTING_READ(DP_TP_CTL(port));
+
+	intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+	I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+	POSTING_READ(DDI_BUF_CTL(port));
+
+	udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	uint32_t val;
+
+	intel_ddi_post_disable(intel_encoder);
+
+	val = I915_READ(_FDI_RXA_CTL);
+	val &= ~FDI_RX_ENABLE;
+	I915_WRITE(_FDI_RXA_CTL, val);
+
+	val = I915_READ(_FDI_RXA_MISC);
+	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+	I915_WRITE(_FDI_RXA_MISC, val);
+
+	val = I915_READ(_FDI_RXA_CTL);
+	val &= ~FDI_PCDCLK;
+	I915_WRITE(_FDI_RXA_CTL, val);
+
+	val = I915_READ(_FDI_RXA_CTL);
+	val &= ~FDI_RX_PLL_ENABLE;
+	I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+	int type = intel_encoder->type;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+		intel_dp_check_link_status(intel_dp);
+}
+
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+	/* HDMI has nothing special to destroy, so we can go with this. */
+	intel_dp_encoder_destroy(encoder);
+}
+
+static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+	int type = intel_encoder->type;
+
+	WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
+
+	if (type == INTEL_OUTPUT_HDMI)
+		return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
+	else
+		return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+	.destroy = intel_ddi_destroy,
+};
+
+static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
+	.mode_fixup = intel_ddi_mode_fixup,
+	.mode_set = intel_ddi_mode_set,
+	.disable = intel_encoder_noop,
+};
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+	struct intel_digital_port *intel_dig_port;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *hdmi_connector = NULL;
+	struct intel_connector *dp_connector = NULL;
+
+	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	if (!intel_dig_port)
+		return;
+
+	dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!dp_connector) {
+		kfree(intel_dig_port);
+		return;
+	}
+
+	if (port != PORT_A) {
+		hdmi_connector = kzalloc(sizeof(struct intel_connector),
+					 GFP_KERNEL);
+		if (!hdmi_connector) {
+			kfree(dp_connector);
+			kfree(intel_dig_port);
+			return;
+		}
+	}
+
+	intel_encoder = &intel_dig_port->base;
+	encoder = &intel_encoder->base;
+
+	drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+
+	intel_encoder->enable = intel_enable_ddi;
+	intel_encoder->pre_enable = intel_ddi_pre_enable;
+	intel_encoder->disable = intel_disable_ddi;
+	intel_encoder->post_disable = intel_ddi_post_disable;
+	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+
+	intel_dig_port->port = port;
+	if (hdmi_connector)
+		intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
+	else
+		intel_dig_port->hdmi.sdvox_reg = 0;
+	intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+
+	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+	intel_encoder->crtc_mask =  (1 << 0) | (1 << 1) | (1 << 2);
+	intel_encoder->cloneable = false;
+	intel_encoder->hot_plug = intel_ddi_hot_plug;
+
+	if (hdmi_connector)
+		intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+	intel_dp_init_connector(intel_dig_port, dp_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b426d44a2b05..5d127e068950 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,8 +41,6 @@
 #include <drm/drm_crtc_helper.h>
 #include <linux/dma_remapping.h>
 
-#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-
 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 static void intel_increase_pllclock(struct drm_crtc *crtc);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -80,6 +78,16 @@ struct intel_limit {
 /* FDI */
 #define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
 
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(!HAS_PCH_SPLIT(dev));
+
+	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
 static bool
 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 		    int target, int refclk, intel_clock_t *match_clock,
@@ -380,7 +388,7 @@ static const intel_limit_t intel_limits_vlv_dac = {
 
 static const intel_limit_t intel_limits_vlv_hdmi = {
 	.dot = { .min = 20000, .max = 165000 },
-	.vco = { .min = 5994000, .max = 4000000 },
+	.vco = { .min = 4000000, .max = 5994000},
 	.n = { .min = 1, .max = 7 },
 	.m = { .min = 60, .max = 300 }, /* guess */
 	.m1 = { .min = 2, .max = 3 },
@@ -393,10 +401,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
 };
 
 static const intel_limit_t intel_limits_vlv_dp = {
-	.dot = { .min = 162000, .max = 270000 },
-	.vco = { .min = 5994000, .max = 4000000 },
+	.dot = { .min = 25000, .max = 270000 },
+	.vco = { .min = 4000000, .max = 6000000 },
 	.n = { .min = 1, .max = 7 },
-	.m = { .min = 60, .max = 300 }, /* guess */
+	.m = { .min = 22, .max = 450 },
 	.m1 = { .min = 2, .max = 3 },
 	.m2 = { .min = 11, .max = 156 },
 	.p = { .min = 10, .max = 30 },
@@ -531,7 +539,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
 				limit = &intel_limits_ironlake_single_lvds;
 		}
 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-			HAS_eDP)
+		   intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 		limit = &intel_limits_ironlake_display_port;
 	else
 		limit = &intel_limits_ironlake_dac;
@@ -927,6 +935,15 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
 	return true;
 }
 
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+					     enum pipe pipe)
+{
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	return intel_crtc->cpu_transcoder;
+}
+
 static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -999,9 +1016,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
 
 	if (INTEL_INFO(dev)->gen >= 4) {
-		int reg = PIPECONF(pipe);
+		int reg = PIPECONF(cpu_transcoder);
 
 		/* Wait for the Pipe State to go off */
 		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1103,12 +1122,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
 	int reg;
 	u32 val;
 	bool cur_state;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
 
 	if (IS_HASWELL(dev_priv->dev)) {
 		/* On Haswell, DDI is used instead of FDI_TX_CTL */
-		reg = DDI_FUNC_CTL(pipe);
+		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
 		val = I915_READ(reg);
-		cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
 	} else {
 		reg = FDI_TX_CTL(pipe);
 		val = I915_READ(reg);
@@ -1128,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
 	u32 val;
 	bool cur_state;
 
-	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
-			DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
-			return;
-	} else {
-		reg = FDI_RX_CTL(pipe);
-		val = I915_READ(reg);
-		cur_state = !!(val & FDI_RX_ENABLE);
-	}
+	reg = FDI_RX_CTL(pipe);
+	val = I915_READ(reg);
+	cur_state = !!(val & FDI_RX_ENABLE);
 	WARN(cur_state != state,
 	     "FDI RX state assertion failure (expected %s, current %s)\n",
 	     state_string(state), state_string(cur_state));
@@ -1168,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
 	int reg;
 	u32 val;
 
-	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
-		DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
-		return;
-	}
 	reg = FDI_RX_CTL(pipe);
 	val = I915_READ(reg);
 	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1212,12 +1224,14 @@ void assert_pipe(struct drm_i915_private *dev_priv,
 	int reg;
 	u32 val;
 	bool cur_state;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
 
 	/* if we need the pipe A quirk it must be always on */
 	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
 		state = true;
 
-	reg = PIPECONF(pipe);
+	reg = PIPECONF(cpu_transcoder);
 	val = I915_READ(reg);
 	cur_state = !!(val & PIPECONF_ENABLE);
 	WARN(cur_state != state,
@@ -1492,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 
 /* SBI access */
 static void
-intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+		enum intel_sbi_destination destination)
 {
 	unsigned long flags;
+	u32 tmp;
 
 	spin_lock_irqsave(&dev_priv->dpio_lock, flags);
-	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-				100)) {
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
 		DRM_ERROR("timeout waiting for SBI to become ready\n");
 		goto out_unlock;
 	}
 
-	I915_WRITE(SBI_ADDR,
-			(reg << 16));
-	I915_WRITE(SBI_DATA,
-			value);
-	I915_WRITE(SBI_CTL_STAT,
-			SBI_BUSY |
-			SBI_CTL_OP_CRWR);
+	I915_WRITE(SBI_ADDR, (reg << 16));
+	I915_WRITE(SBI_DATA, value);
+
+	if (destination == SBI_ICLK)
+		tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+	else
+		tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+	I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
 
 	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
 				100)) {
@@ -1522,23 +1538,25 @@ out_unlock:
 }
 
 static u32
-intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+	       enum intel_sbi_destination destination)
 {
 	unsigned long flags;
 	u32 value = 0;
 
 	spin_lock_irqsave(&dev_priv->dpio_lock, flags);
-	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-				100)) {
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
 		DRM_ERROR("timeout waiting for SBI to become ready\n");
 		goto out_unlock;
 	}
 
-	I915_WRITE(SBI_ADDR,
-			(reg << 16));
-	I915_WRITE(SBI_CTL_STAT,
-			SBI_BUSY |
-			SBI_CTL_OP_CRRD);
+	I915_WRITE(SBI_ADDR, (reg << 16));
+
+	if (destination == SBI_ICLK)
+		value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+	else
+		value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+	I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
 	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
 				100)) {
@@ -1554,14 +1572,14 @@ out_unlock:
 }
 
 /**
- * intel_enable_pch_pll - enable PCH PLL
+ * ironlake_enable_pch_pll - enable PCH PLL
  * @dev_priv: i915 private structure
  * @pipe: pipe PLL to enable
  *
  * The PCH PLL needs to be enabled before the PCH transcoder, since it
  * drives the transcoder clock.
  */
-static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
+static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
 {
 	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
 	struct intel_pch_pll *pll;
@@ -1645,12 +1663,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
 	pll->on = false;
 }
 
-static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
-				    enum pipe pipe)
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+					   enum pipe pipe)
 {
-	int reg;
-	u32 val, pipeconf_val;
+	struct drm_device *dev = dev_priv->dev;
 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	uint32_t reg, val, pipeconf_val;
 
 	/* PCH only available on ILK+ */
 	BUG_ON(dev_priv->info->gen < 5);
@@ -1664,10 +1682,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
 	assert_fdi_tx_enabled(dev_priv, pipe);
 	assert_fdi_rx_enabled(dev_priv, pipe);
 
-	if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
-		DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
-		return;
+	if (HAS_PCH_CPT(dev)) {
+		/* Workaround: Set the timing override bit before enabling the
+		 * pch transcoder. */
+		reg = TRANS_CHICKEN2(pipe);
+		val = I915_READ(reg);
+		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+		I915_WRITE(reg, val);
 	}
+
 	reg = TRANSCONF(pipe);
 	val = I915_READ(reg);
 	pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1696,11 +1719,42 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
 		DRM_ERROR("failed to enable transcoder %d\n", pipe);
 }
 
-static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
-				     enum pipe pipe)
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+				      enum transcoder cpu_transcoder)
 {
-	int reg;
-	u32 val;
+	u32 val, pipeconf_val;
+
+	/* PCH only available on ILK+ */
+	BUG_ON(dev_priv->info->gen < 5);
+
+	/* FDI must be feeding us bits for PCH ports */
+	assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+
+	/* Workaround: set timing override bit. */
+	val = I915_READ(_TRANSA_CHICKEN2);
+	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+	I915_WRITE(_TRANSA_CHICKEN2, val);
+
+	val = TRANS_ENABLE;
+	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+	    PIPECONF_INTERLACED_ILK)
+		val |= TRANS_INTERLACED;
+	else
+		val |= TRANS_PROGRESSIVE;
+
+	I915_WRITE(TRANSCONF(TRANSCODER_A), val);
+	if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
+		DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+					    enum pipe pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	uint32_t reg, val;
 
 	/* FDI relies on the transcoder */
 	assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1716,6 +1770,31 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
 	/* wait for PCH transcoder off, transcoder state */
 	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
 		DRM_ERROR("failed to disable transcoder %d\n", pipe);
+
+	if (!HAS_PCH_IBX(dev)) {
+		/* Workaround: Clear the timing override chicken bit again. */
+		reg = TRANS_CHICKEN2(pipe);
+		val = I915_READ(reg);
+		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+		I915_WRITE(reg, val);
+	}
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+
+	val = I915_READ(_TRANSACONF);
+	val &= ~TRANS_ENABLE;
+	I915_WRITE(_TRANSACONF, val);
+	/* wait for PCH transcoder off, transcoder state */
+	if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
+		DRM_ERROR("Failed to disable PCH transcoder\n");
+
+	/* Workaround: clear timing override bit. */
+	val = I915_READ(_TRANSA_CHICKEN2);
+	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+	I915_WRITE(_TRANSA_CHICKEN2, val);
 }
 
 /**
@@ -1735,9 +1814,17 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
 			      bool pch_port)
 {
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+	enum transcoder pch_transcoder;
 	int reg;
 	u32 val;
 
+	if (IS_HASWELL(dev_priv->dev))
+		pch_transcoder = TRANSCODER_A;
+	else
+		pch_transcoder = pipe;
+
 	/*
 	 * A pipe without a PLL won't actually be able to drive bits from
 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
@@ -1748,13 +1835,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
 	else {
 		if (pch_port) {
 			/* if driving the PCH, we need FDI enabled */
-			assert_fdi_rx_pll_enabled(dev_priv, pipe);
-			assert_fdi_tx_pll_enabled(dev_priv, pipe);
+			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+			assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
 		}
 		/* FIXME: assert CPU port conditions for SNB+ */
 	}
 
-	reg = PIPECONF(pipe);
+	reg = PIPECONF(cpu_transcoder);
 	val = I915_READ(reg);
 	if (val & PIPECONF_ENABLE)
 		return;
@@ -1778,6 +1865,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
 			       enum pipe pipe)
 {
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
 	int reg;
 	u32 val;
 
@@ -1791,7 +1880,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
 		return;
 
-	reg = PIPECONF(pipe);
+	reg = PIPECONF(cpu_transcoder);
 	val = I915_READ(reg);
 	if ((val & PIPECONF_ENABLE) == 0)
 		return;
@@ -1807,8 +1896,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
 void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 				      enum plane plane)
 {
-	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
-	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+	if (dev_priv->info->gen >= 4)
+		I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+	else
+		I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
 }
 
 /**
@@ -1926,9 +2017,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
  * is assumed to be a power-of-two. */
-static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
-							unsigned int bpp,
-							unsigned int pitch)
+unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+					       unsigned int bpp,
+					       unsigned int pitch)
 {
 	int tile_rows, tiles;
 
@@ -1969,24 +2060,38 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 	dspcntr = I915_READ(reg);
 	/* Mask out pixel format bits in case we change it */
 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-	switch (fb->bits_per_pixel) {
-	case 8:
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_C8:
 		dspcntr |= DISPPLANE_8BPP;
 		break;
-	case 16:
-		if (fb->depth == 15)
-			dspcntr |= DISPPLANE_15_16BPP;
-		else
-			dspcntr |= DISPPLANE_16BPP;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_ARGB1555:
+		dspcntr |= DISPPLANE_BGRX555;
 		break;
-	case 24:
-	case 32:
-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+	case DRM_FORMAT_RGB565:
+		dspcntr |= DISPPLANE_BGRX565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		dspcntr |= DISPPLANE_BGRX888;
+		break;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		dspcntr |= DISPPLANE_RGBX888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		dspcntr |= DISPPLANE_BGRX101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		dspcntr |= DISPPLANE_RGBX101010;
 		break;
 	default:
-		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+		DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
 		return -EINVAL;
 	}
+
 	if (INTEL_INFO(dev)->gen >= 4) {
 		if (obj->tiling_mode != I915_TILING_NONE)
 			dspcntr |= DISPPLANE_TILED;
@@ -2000,9 +2105,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		intel_crtc->dspaddr_offset =
-			gen4_compute_dspaddr_offset_xtiled(&x, &y,
-							   fb->bits_per_pixel / 8,
-							   fb->pitches[0]);
+			intel_gen4_compute_offset_xtiled(&x, &y,
+							 fb->bits_per_pixel / 8,
+							 fb->pitches[0]);
 		linear_offset -= intel_crtc->dspaddr_offset;
 	} else {
 		intel_crtc->dspaddr_offset = linear_offset;
@@ -2053,27 +2158,31 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
 	dspcntr = I915_READ(reg);
 	/* Mask out pixel format bits in case we change it */
 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-	switch (fb->bits_per_pixel) {
-	case 8:
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_C8:
 		dspcntr |= DISPPLANE_8BPP;
 		break;
-	case 16:
-		if (fb->depth != 16)
-			return -EINVAL;
-
-		dspcntr |= DISPPLANE_16BPP;
+	case DRM_FORMAT_RGB565:
+		dspcntr |= DISPPLANE_BGRX565;
 		break;
-	case 24:
-	case 32:
-		if (fb->depth == 24)
-			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
-		else if (fb->depth == 30)
-			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
-		else
-			return -EINVAL;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		dspcntr |= DISPPLANE_BGRX888;
+		break;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		dspcntr |= DISPPLANE_RGBX888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		dspcntr |= DISPPLANE_BGRX101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		dspcntr |= DISPPLANE_RGBX101010;
 		break;
 	default:
-		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+		DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
 		return -EINVAL;
 	}
 
@@ -2089,9 +2198,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
 
 	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
 	intel_crtc->dspaddr_offset =
-		gen4_compute_dspaddr_offset_xtiled(&x, &y,
-						   fb->bits_per_pixel / 8,
-						   fb->pitches[0]);
+		intel_gen4_compute_offset_xtiled(&x, &y,
+						 fb->bits_per_pixel / 8,
+						 fb->pitches[0]);
 	linear_offset -= intel_crtc->dspaddr_offset;
 
 	DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2099,8 +2208,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_MODIFY_DISPBASE(DSPSURF(plane),
 			     obj->gtt_offset + intel_crtc->dspaddr_offset);
-	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
-	I915_WRITE(DSPLINOFF(plane), linear_offset);
+	if (IS_HASWELL(dev)) {
+		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+	} else {
+		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+		I915_WRITE(DSPLINOFF(plane), linear_offset);
+	}
 	POSTING_READ(reg);
 
 	return 0;
@@ -2148,13 +2261,39 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
 	return ret;
 }
 
+static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_master_private *master_priv;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	if (!dev->primary->master)
+		return;
+
+	master_priv = dev->primary->master->driver_priv;
+	if (!master_priv->sarea_priv)
+		return;
+
+	switch (intel_crtc->pipe) {
+	case 0:
+		master_priv->sarea_priv->pipeA_x = x;
+		master_priv->sarea_priv->pipeA_y = y;
+		break;
+	case 1:
+		master_priv->sarea_priv->pipeB_x = x;
+		master_priv->sarea_priv->pipeB_y = y;
+		break;
+	default:
+		break;
+	}
+}
+
 static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 		    struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_master_private *master_priv;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct drm_framebuffer *old_fb;
 	int ret;
@@ -2206,20 +2345,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 	intel_update_fbc(dev);
 	mutex_unlock(&dev->struct_mutex);
 
-	if (!dev->primary->master)
-		return 0;
-
-	master_priv = dev->primary->master->driver_priv;
-	if (!master_priv->sarea_priv)
-		return 0;
-
-	if (intel_crtc->pipe) {
-		master_priv->sarea_priv->pipeB_x = x;
-		master_priv->sarea_priv->pipeB_y = y;
-	} else {
-		master_priv->sarea_priv->pipeA_x = x;
-		master_priv->sarea_priv->pipeA_y = y;
-	}
+	intel_crtc_update_sarea_pos(crtc, x, y);
 
 	return 0;
 }
@@ -2302,16 +2428,27 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
 			   FDI_FE_ERRC_ENABLE);
 }
 
-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+static void ivb_modeset_global_resources(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 flags = I915_READ(SOUTH_CHICKEN1);
+	struct intel_crtc *pipe_B_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+	struct intel_crtc *pipe_C_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+	uint32_t temp;
 
-	flags |= FDI_PHASE_SYNC_OVR(pipe);
-	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
-	flags |= FDI_PHASE_SYNC_EN(pipe);
-	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
-	POSTING_READ(SOUTH_CHICKEN1);
+	/* When everything is off disable fdi C so that we could enable fdi B
+	 * with all lanes. XXX: This misses the case where a pipe is not using
+	 * any pch resources and so doesn't need any fdi lanes. */
+	if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
+		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+		temp = I915_READ(SOUTH_CHICKEN1);
+		temp &= ~FDI_BC_BIFURCATION_SELECT;
+		DRM_DEBUG_KMS("disabling fdi C rx\n");
+		I915_WRITE(SOUTH_CHICKEN1, temp);
+	}
 }
 
 /* The FDI link training functions for ILK/Ibexpeak. */
@@ -2357,11 +2494,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 	udelay(150);
 
 	/* Ironlake workaround, enable clock pointer after FDI enable*/
-	if (HAS_PCH_IBX(dev)) {
-		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
-			   FDI_RX_PHASE_SYNC_POINTER_EN);
-	}
+	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+		   FDI_RX_PHASE_SYNC_POINTER_EN);
 
 	reg = FDI_RX_IIR(pipe);
 	for (tries = 0; tries < 5; tries++) {
@@ -2450,6 +2585,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
+	I915_WRITE(FDI_RX_MISC(pipe),
+		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
 	reg = FDI_RX_CTL(pipe);
 	temp = I915_READ(reg);
 	if (HAS_PCH_CPT(dev)) {
@@ -2464,9 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
 	POSTING_READ(reg);
 	udelay(150);
 
-	if (HAS_PCH_CPT(dev))
-		cpt_phase_pointer_enable(dev, pipe);
-
 	for (i = 0; i < 4; i++) {
 		reg = FDI_TX_CTL(pipe);
 		temp = I915_READ(reg);
@@ -2570,6 +2705,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 	POSTING_READ(reg);
 	udelay(150);
 
+	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+		      I915_READ(FDI_RX_IIR(pipe)));
+
 	/* enable CPU FDI TX and PCH FDI RX */
 	reg = FDI_TX_CTL(pipe);
 	temp = I915_READ(reg);
@@ -2582,6 +2720,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 	temp |= FDI_COMPOSITE_SYNC;
 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
+	I915_WRITE(FDI_RX_MISC(pipe),
+		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
 	reg = FDI_RX_CTL(pipe);
 	temp = I915_READ(reg);
 	temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2593,9 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 	POSTING_READ(reg);
 	udelay(150);
 
-	if (HAS_PCH_CPT(dev))
-		cpt_phase_pointer_enable(dev, pipe);
-
 	for (i = 0; i < 4; i++) {
 		reg = FDI_TX_CTL(pipe);
 		temp = I915_READ(reg);
@@ -2613,7 +2751,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 		if (temp & FDI_RX_BIT_LOCK ||
 		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
-			DRM_DEBUG_KMS("FDI train 1 done.\n");
+			DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
 			break;
 		}
 	}
@@ -2654,7 +2792,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 
 		if (temp & FDI_RX_SYMBOL_LOCK) {
 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-			DRM_DEBUG_KMS("FDI train 2 done.\n");
+			DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
 			break;
 		}
 	}
@@ -2671,9 +2809,6 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 	int pipe = intel_crtc->pipe;
 	u32 reg, temp;
 
-	/* Write the TU size bits so error detection works */
-	I915_WRITE(FDI_RX_TUSIZE1(pipe),
-		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
 	reg = FDI_RX_CTL(pipe);
@@ -2737,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
 	udelay(100);
 }
 
-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 flags = I915_READ(SOUTH_CHICKEN1);
-
-	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
-	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
-	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
-	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
-	POSTING_READ(SOUTH_CHICKEN1);
-}
 static void ironlake_fdi_disable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -2774,11 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
 	/* Ironlake workaround, disable clock pointer after downing FDI */
 	if (HAS_PCH_IBX(dev)) {
 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-		I915_WRITE(FDI_RX_CHICKEN(pipe),
-			   I915_READ(FDI_RX_CHICKEN(pipe) &
-				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
-	} else if (HAS_PCH_CPT(dev)) {
-		cpt_phase_pointer_disable(dev, pipe);
 	}
 
 	/* still set train pattern 1 */
@@ -2839,7 +2958,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 	mutex_unlock(&dev->struct_mutex);
 }
 
-static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct intel_encoder *intel_encoder;
@@ -2849,23 +2968,6 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
 	 * must be driven by its own crtc; no sharing is possible.
 	 */
 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-
-		/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
-		 * CPU handles all others */
-		if (IS_HASWELL(dev)) {
-			/* It is still unclear how this will work on PPT, so throw up a warning */
-			WARN_ON(!HAS_PCH_LPT(dev));
-
-			if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
-				DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
-				return true;
-			} else {
-				DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
-					      intel_encoder->type);
-				return false;
-			}
-		}
-
 		switch (intel_encoder->type) {
 		case INTEL_OUTPUT_EDP:
 			if (!intel_encoder_is_pch_edp(&intel_encoder->base))
@@ -2877,6 +2979,11 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
 	return true;
 }
 
+static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
+{
+	return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
+}
+
 /* Program iCLKIP clock to the desired frequency */
 static void lpt_program_iclkip(struct drm_crtc *crtc)
 {
@@ -2892,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
 
 	/* Disable SSCCTL */
 	intel_sbi_write(dev_priv, SBI_SSCCTL6,
-				intel_sbi_read(dev_priv, SBI_SSCCTL6) |
-					SBI_SSCCTL_DISABLE);
+			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+				SBI_SSCCTL_DISABLE,
+			SBI_ICLK);
 
 	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
 	if (crtc->mode.clock == 20000) {
@@ -2934,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
 			phaseinc);
 
 	/* Program SSCDIVINTPHASE6 */
-	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-
-	intel_sbi_write(dev_priv,
-			SBI_SSCDIVINTPHASE6,
-			temp);
+	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
 
 	/* Program SSCAUXDIV */
-	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
-	intel_sbi_write(dev_priv,
-			SBI_SSCAUXDIV6,
-			temp);
-
+	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
 
 	/* Enable modulator and associated divider */
-	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
 	temp &= ~SBI_SSCCTL_DISABLE;
-	intel_sbi_write(dev_priv,
-			SBI_SSCCTL6,
-			temp);
+	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 
 	/* Wait for initialization time */
 	udelay(24);
@@ -2986,15 +3086,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 
 	assert_transcoder_disabled(dev_priv, pipe);
 
+	/* Write the TU size bits before fdi link training, so that error
+	 * detection works. */
+	I915_WRITE(FDI_RX_TUSIZE1(pipe),
+		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
 	/* For PCH output, training FDI link */
 	dev_priv->display.fdi_link_train(crtc);
 
-	intel_enable_pch_pll(intel_crtc);
+	/* XXX: pch pll's can be enabled any time before we enable the PCH
+	 * transcoder, and we actually should do this to not upset any PCH
+	 * transcoder that already use the clock when we share it.
+	 *
+	 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
+	 * unconditionally resets the pll - we need that to have the right LVDS
+	 * enable sequence. */
+	ironlake_enable_pch_pll(intel_crtc);
 
-	if (HAS_PCH_LPT(dev)) {
-		DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
-		lpt_program_iclkip(crtc);
-	} else if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev)) {
 		u32 sel;
 
 		temp = I915_READ(PCH_DPLL_SEL);
@@ -3031,8 +3140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
 	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
 
-	if (!IS_HASWELL(dev))
-		intel_fdi_normal_train(crtc);
+	intel_fdi_normal_train(crtc);
 
 	/* For PCH DP, enable TRANS_DP_CTL */
 	if (HAS_PCH_CPT(dev) &&
@@ -3064,15 +3172,37 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 			temp |= TRANS_DP_PORT_SEL_D;
 			break;
 		default:
-			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
-			temp |= TRANS_DP_PORT_SEL_B;
-			break;
+			BUG();
 		}
 
 		I915_WRITE(reg, temp);
 	}
 
-	intel_enable_transcoder(dev_priv, pipe);
+	ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+	assert_transcoder_disabled(dev_priv, TRANSCODER_A);
+
+	lpt_program_iclkip(crtc);
+
+	/* Set transcoder timing. */
+	I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
+	I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
+	I915_WRITE(_TRANS_HSYNC_A,  I915_READ(HSYNC(cpu_transcoder)));
+
+	I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
+	I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
+	I915_WRITE(_TRANS_VSYNC_A,  I915_READ(VSYNC(cpu_transcoder)));
+	I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
+
+	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
 
 static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
@@ -3165,16 +3295,12 @@ prepare: /* separate function? */
 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
+	int dslreg = PIPEDSL(pipe);
 	u32 temp;
 
 	temp = I915_READ(dslreg);
 	udelay(500);
 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
-		/* Without this, mode sets may fail silently on FDI */
-		I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
-		udelay(250);
-		I915_WRITE(tc2reg, 0);
 		if (wait_for(I915_READ(dslreg) != temp, 5))
 			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
 	}
@@ -3205,9 +3331,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
 	}
 
-	is_pch_port = intel_crtc_driving_pch(crtc);
+	is_pch_port = ironlake_crtc_driving_pch(crtc);
 
 	if (is_pch_port) {
+		/* Note: FDI PLL enabling _must_ be done before we enable the
+		 * cpu pipes, hence this is separate from all the other fdi/pch
+		 * enabling. */
 		ironlake_fdi_pll_enable(intel_crtc);
 	} else {
 		assert_fdi_tx_disabled(dev_priv, pipe);
@@ -3220,12 +3349,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
 	/* Enable panel fitting for LVDS */
 	if (dev_priv->pch_pf_size &&
-	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
 		/* Force use of hard-coded filter coefficients
 		 * as some pre-programmed values are broken,
 		 * e.g. x201.
 		 */
-		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+		if (IS_IVYBRIDGE(dev))
+			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+						 PF_PIPE_SEL_IVB(pipe));
+		else
+			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
 		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
 		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
 	}
@@ -3265,6 +3399,83 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 }
 
+static void haswell_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	bool is_pch_port;
+
+	WARN_ON(!crtc->enabled);
+
+	if (intel_crtc->active)
+		return;
+
+	intel_crtc->active = true;
+	intel_update_watermarks(dev);
+
+	is_pch_port = haswell_crtc_driving_pch(crtc);
+
+	if (is_pch_port)
+		dev_priv->display.fdi_link_train(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
+
+	intel_ddi_enable_pipe_clock(intel_crtc);
+
+	/* Enable panel fitting for eDP */
+	if (dev_priv->pch_pf_size &&
+	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+		/* Force use of hard-coded filter coefficients
+		 * as some pre-programmed values are broken,
+		 * e.g. x201.
+		 */
+		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+					 PF_PIPE_SEL_IVB(pipe));
+		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+	}
+
+	/*
+	 * On ILK+ LUT must be loaded before the pipe is running but with
+	 * clocks enabled
+	 */
+	intel_crtc_load_lut(crtc);
+
+	intel_ddi_set_pipe_settings(crtc);
+	intel_ddi_enable_pipe_func(crtc);
+
+	intel_enable_pipe(dev_priv, pipe, is_pch_port);
+	intel_enable_plane(dev_priv, plane, pipe);
+
+	if (is_pch_port)
+		lpt_pch_enable(crtc);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_crtc_update_cursor(crtc, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+
+	/*
+	 * There seems to be a race in PCH platform hw (at least on some
+	 * outputs) where an enabled pipe still completes any pageflip right
+	 * away (as if the pipe is off) instead of waiting for vblank. As soon
+	 * as the first vblank happend, everything works as expected. Hence just
+	 * wait for one vblank before returning to avoid strange things
+	 * happening.
+	 */
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
 static void ironlake_crtc_disable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3303,7 +3514,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 
 	ironlake_fdi_disable(crtc);
 
-	intel_disable_transcoder(dev_priv, pipe);
+	ironlake_disable_pch_transcoder(dev_priv, pipe);
 
 	if (HAS_PCH_CPT(dev)) {
 		/* disable TRANS_DP_CTL */
@@ -3345,12 +3556,78 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 	mutex_unlock(&dev->struct_mutex);
 }
 
+static void haswell_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+	bool is_pch_port;
+
+	if (!intel_crtc->active)
+		return;
+
+	is_pch_port = haswell_crtc_driving_pch(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
+	intel_crtc_wait_for_pending_flips(crtc);
+	drm_vblank_off(dev, pipe);
+	intel_crtc_update_cursor(crtc, false);
+
+	intel_disable_plane(dev_priv, plane, pipe);
+
+	if (dev_priv->cfb_plane == plane)
+		intel_disable_fbc(dev);
+
+	intel_disable_pipe(dev_priv, pipe);
+
+	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+	/* Disable PF */
+	I915_WRITE(PF_CTL(pipe), 0);
+	I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+	intel_ddi_disable_pipe_clock(intel_crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
+
+	if (is_pch_port) {
+		lpt_disable_pch_transcoder(dev_priv);
+		intel_ddi_fdi_disable(crtc);
+	}
+
+	intel_crtc->active = false;
+	intel_update_watermarks(dev);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+}
+
 static void ironlake_crtc_off(struct drm_crtc *crtc)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	intel_put_pch_pll(intel_crtc);
 }
 
+static void haswell_crtc_off(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
+	 * start using it. */
+	intel_crtc->cpu_transcoder = intel_crtc->pipe;
+
+	intel_ddi_put_crtc_pll(crtc);
+}
+
 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
 {
 	if (!enable && intel_crtc->overlay) {
@@ -4061,7 +4338,7 @@ static void vlv_update_pll(struct drm_crtc *crtc,
 			   struct drm_display_mode *mode,
 			   struct drm_display_mode *adjusted_mode,
 			   intel_clock_t *clock, intel_clock_t *reduced_clock,
-			   int refclk, int num_connectors)
+			   int num_connectors)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4069,9 +4346,19 @@ static void vlv_update_pll(struct drm_crtc *crtc,
 	int pipe = intel_crtc->pipe;
 	u32 dpll, mdiv, pdiv;
 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
-	bool is_hdmi;
+	bool is_sdvo;
+	u32 temp;
 
-	is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+	is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+		intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+	dpll = DPLL_VGA_MODE_DIS;
+	dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
+	dpll |= DPLL_REFA_CLK_ENABLE_VLV;
+	dpll |= DPLL_INTEGRATED_CLOCK_VLV;
+
+	I915_WRITE(DPLL(pipe), dpll);
+	POSTING_READ(DPLL(pipe));
 
 	bestn = clock->n;
 	bestm1 = clock->m1;
@@ -4079,12 +4366,10 @@ static void vlv_update_pll(struct drm_crtc *crtc,
 	bestp1 = clock->p1;
 	bestp2 = clock->p2;
 
-	/* Enable DPIO clock input */
-	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
-		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
-	I915_WRITE(DPLL(pipe), dpll);
-	POSTING_READ(DPLL(pipe));
-
+	/*
+	 * In Valleyview PLL and program lane counter registers are exposed
+	 * through DPIO interface
+	 */
 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
 	mdiv |= ((bestn << DPIO_N_SHIFT));
@@ -4095,12 +4380,13 @@ static void vlv_update_pll(struct drm_crtc *crtc,
 
 	intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
 
-	pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
+	pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
 		(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
-		(8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+		(7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
+		(5 << DPIO_CLK_BIAS_CTL_SHIFT);
 	intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
 
-	intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
+	intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
 
 	dpll |= DPLL_VCO_ENABLE;
 	I915_WRITE(DPLL(pipe), dpll);
@@ -4108,19 +4394,44 @@ static void vlv_update_pll(struct drm_crtc *crtc,
 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
 
-	if (is_hdmi) {
-		u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+	intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
 
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+		intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+	I915_WRITE(DPLL(pipe), dpll);
+
+	/* Wait for the clocks to stabilize. */
+	POSTING_READ(DPLL(pipe));
+	udelay(150);
+
+	temp = 0;
+	if (is_sdvo) {
+		temp = intel_mode_get_pixel_multiplier(adjusted_mode);
 		if (temp > 1)
 			temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 		else
 			temp = 0;
-
-		I915_WRITE(DPLL_MD(pipe), temp);
-		POSTING_READ(DPLL_MD(pipe));
 	}
+	I915_WRITE(DPLL_MD(pipe), temp);
+	POSTING_READ(DPLL_MD(pipe));
 
-	intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
+	/* Now program lane control registers */
+	if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
+			|| intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+	{
+		temp = 0x1000C4;
+		if(pipe == 1)
+			temp |= (1 << 21);
+		intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
+	}
+	if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
+	{
+		temp = 0x1000C4;
+		if(pipe == 1)
+			temp |= (1 << 21);
+		intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
+	}
 }
 
 static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4136,6 +4447,8 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
 	u32 dpll;
 	bool is_sdvo;
 
+	i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
 	is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
 		intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
 
@@ -4236,7 +4549,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
 
 static void i8xx_update_pll(struct drm_crtc *crtc,
 			    struct drm_display_mode *adjusted_mode,
-			    intel_clock_t *clock,
+			    intel_clock_t *clock, intel_clock_t *reduced_clock,
 			    int num_connectors)
 {
 	struct drm_device *dev = crtc->dev;
@@ -4245,6 +4558,8 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
 	int pipe = intel_crtc->pipe;
 	u32 dpll;
 
+	i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
 	dpll = DPLL_VGA_MODE_DIS;
 
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -4294,6 +4609,64 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
 	I915_WRITE(DPLL(pipe), dpll);
 }
 
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_crtc->pipe;
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+	uint32_t vsyncshift;
+
+	if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* the chip adds 2 halflines automatically */
+		adjusted_mode->crtc_vtotal -= 1;
+		adjusted_mode->crtc_vblank_end -= 1;
+		vsyncshift = adjusted_mode->crtc_hsync_start
+			     - adjusted_mode->crtc_htotal / 2;
+	} else {
+		vsyncshift = 0;
+	}
+
+	if (INTEL_INFO(dev)->gen > 3)
+		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+	I915_WRITE(HTOTAL(cpu_transcoder),
+		   (adjusted_mode->crtc_hdisplay - 1) |
+		   ((adjusted_mode->crtc_htotal - 1) << 16));
+	I915_WRITE(HBLANK(cpu_transcoder),
+		   (adjusted_mode->crtc_hblank_start - 1) |
+		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	I915_WRITE(HSYNC(cpu_transcoder),
+		   (adjusted_mode->crtc_hsync_start - 1) |
+		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+	I915_WRITE(VTOTAL(cpu_transcoder),
+		   (adjusted_mode->crtc_vdisplay - 1) |
+		   ((adjusted_mode->crtc_vtotal - 1) << 16));
+	I915_WRITE(VBLANK(cpu_transcoder),
+		   (adjusted_mode->crtc_vblank_start - 1) |
+		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
+	I915_WRITE(VSYNC(cpu_transcoder),
+		   (adjusted_mode->crtc_vsync_start - 1) |
+		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
+	 * bits. */
+	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+	    (pipe == PIPE_B || pipe == PIPE_C))
+		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+	/* pipesrc controls the size that is scaled from, which should
+	 * always be the user's requested size.
+	 */
+	I915_WRITE(PIPESRC(pipe),
+		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+
 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 			      struct drm_display_mode *mode,
 			      struct drm_display_mode *adjusted_mode,
@@ -4307,7 +4680,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 	int plane = intel_crtc->plane;
 	int refclk, num_connectors = 0;
 	intel_clock_t clock, reduced_clock;
-	u32 dspcntr, pipeconf, vsyncshift;
+	u32 dspcntr, pipeconf;
 	bool ok, has_reduced_clock = false, is_sdvo = false;
 	bool is_lvds = false, is_tv = false, is_dp = false;
 	struct intel_encoder *encoder;
@@ -4371,14 +4744,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 	if (is_sdvo && is_tv)
 		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
 
-	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
-				 &reduced_clock : NULL);
-
 	if (IS_GEN2(dev))
-		i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
+		i8xx_update_pll(crtc, adjusted_mode, &clock,
+				has_reduced_clock ? &reduced_clock : NULL,
+				num_connectors);
 	else if (IS_VALLEYVIEW(dev))
-		vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
-			       refclk, num_connectors);
+		vlv_update_pll(crtc, mode, adjusted_mode, &clock,
+				has_reduced_clock ? &reduced_clock : NULL,
+				num_connectors);
 	else
 		i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
 				has_reduced_clock ? &reduced_clock : NULL,
@@ -4419,6 +4792,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 		}
 	}
 
+	if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+		if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+			pipeconf |= PIPECONF_BPP_6 |
+					PIPECONF_ENABLE |
+					I965_PIPECONF_ACTIVE;
+		}
+	}
+
 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
 	drm_mode_debug_printmodeline(mode);
 
@@ -4434,40 +4815,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 
 	pipeconf &= ~PIPECONF_INTERLACE_MASK;
 	if (!IS_GEN2(dev) &&
-	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
 		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
-		/* the chip adds 2 halflines automatically */
-		adjusted_mode->crtc_vtotal -= 1;
-		adjusted_mode->crtc_vblank_end -= 1;
-		vsyncshift = adjusted_mode->crtc_hsync_start
-			     - adjusted_mode->crtc_htotal/2;
-	} else {
+	else
 		pipeconf |= PIPECONF_PROGRESSIVE;
-		vsyncshift = 0;
-	}
 
-	if (!IS_GEN3(dev))
-		I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
-
-	I915_WRITE(HTOTAL(pipe),
-		   (adjusted_mode->crtc_hdisplay - 1) |
-		   ((adjusted_mode->crtc_htotal - 1) << 16));
-	I915_WRITE(HBLANK(pipe),
-		   (adjusted_mode->crtc_hblank_start - 1) |
-		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
-	I915_WRITE(HSYNC(pipe),
-		   (adjusted_mode->crtc_hsync_start - 1) |
-		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
-	I915_WRITE(VTOTAL(pipe),
-		   (adjusted_mode->crtc_vdisplay - 1) |
-		   ((adjusted_mode->crtc_vtotal - 1) << 16));
-	I915_WRITE(VBLANK(pipe),
-		   (adjusted_mode->crtc_vblank_start - 1) |
-		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
-	I915_WRITE(VSYNC(pipe),
-		   (adjusted_mode->crtc_vsync_start - 1) |
-		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
 	/* pipesrc and dspsize control the size that is scaled from,
 	 * which should always be the user's requested size.
@@ -4476,8 +4829,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 		   ((mode->vdisplay - 1) << 16) |
 		   (mode->hdisplay - 1));
 	I915_WRITE(DSPPOS(plane), 0);
-	I915_WRITE(PIPESRC(pipe),
-		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 
 	I915_WRITE(PIPECONF(pipe), pipeconf);
 	POSTING_READ(PIPECONF(pipe));
@@ -4495,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 	return ret;
 }
 
-/*
- * Initialize reference clocks when the driver loads
- */
-void ironlake_init_pch_refclk(struct drm_device *dev)
+static void ironlake_init_pch_refclk(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4612,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
 	}
 }
 
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_encoder *encoder;
+	bool has_vga = false;
+	bool is_sdv = false;
+	u32 tmp;
+
+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_ANALOG:
+			has_vga = true;
+			break;
+		}
+	}
+
+	if (!has_vga)
+		return;
+
+	/* XXX: Rip out SDV support once Haswell ships for real. */
+	if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+		is_sdv = true;
+
+	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+	tmp &= ~SBI_SSCCTL_DISABLE;
+	tmp |= SBI_SSCCTL_PATHALT;
+	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+	udelay(24);
+
+	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+	tmp &= ~SBI_SSCCTL_PATHALT;
+	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+	if (!is_sdv) {
+		tmp = I915_READ(SOUTH_CHICKEN2);
+		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+		I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+			DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+		tmp = I915_READ(SOUTH_CHICKEN2);
+		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+		I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+				        FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+				       100))
+			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+	}
+
+	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+	tmp &= ~(0xFF << 24);
+	tmp |= (0x12 << 24);
+	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+	if (!is_sdv) {
+		tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
+		tmp &= ~(0x3 << 6);
+		tmp |= (1 << 6) | (1 << 0);
+		intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
+	}
+
+	if (is_sdv) {
+		tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+		tmp |= 0x7FFF;
+		intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+	}
+
+	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+	tmp |= (1 << 11);
+	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+	tmp |= (1 << 11);
+	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+	if (is_sdv) {
+		tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+		tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+		intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+		tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+		intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+		tmp |= (0x3F << 8);
+		intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+		tmp |= (0x3F << 8);
+		intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+	}
+
+	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+	if (!is_sdv) {
+		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+		tmp &= ~(7 << 13);
+		tmp |= (5 << 13);
+		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+		tmp &= ~(7 << 13);
+		tmp |= (5 << 13);
+		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+	}
+
+	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+	tmp &= ~0xFF;
+	tmp |= 0x1C;
+	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+	tmp &= ~0xFF;
+	tmp |= 0x1C;
+	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+	tmp &= ~(0xFF << 16);
+	tmp |= (0x1C << 16);
+	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+	tmp &= ~(0xFF << 16);
+	tmp |= (0x1C << 16);
+	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+	if (!is_sdv) {
+		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+		tmp |= (1 << 27);
+		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+		tmp |= (1 << 27);
+		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+		tmp &= ~(0xF << 28);
+		tmp |= (4 << 28);
+		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+		tmp &= ~(0xF << 28);
+		tmp |= (4 << 28);
+		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+	}
+
+	/* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+	tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+	tmp |= SBI_DBUFF0_ENABLE;
+	intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		ironlake_init_pch_refclk(dev);
+	else if (HAS_PCH_LPT(dev))
+		lpt_init_pch_refclk(dev);
+}
+
 static int ironlake_get_refclk(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -4668,8 +5192,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
 		val |= PIPE_12BPC;
 		break;
 	default:
-		val |= PIPE_8BPC;
-		break;
+		/* Case prevented by intel_choose_pipe_bpp_dither. */
+		BUG();
 	}
 
 	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
@@ -4686,6 +5210,31 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
 	POSTING_READ(PIPECONF(pipe));
 }
 
+static void haswell_set_pipeconf(struct drm_crtc *crtc,
+				 struct drm_display_mode *adjusted_mode,
+				 bool dither)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+	uint32_t val;
+
+	val = I915_READ(PIPECONF(cpu_transcoder));
+
+	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+	if (dither)
+		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+	val &= ~PIPECONF_INTERLACE_MASK_HSW;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		val |= PIPECONF_INTERLACED_ILK;
+	else
+		val |= PIPECONF_PROGRESSIVE;
+
+	I915_WRITE(PIPECONF(cpu_transcoder), val);
+	POSTING_READ(PIPECONF(cpu_transcoder));
+}
+
 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
 				    struct drm_display_mode *adjusted_mode,
 				    intel_clock_t *clock,
@@ -4749,74 +5298,126 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
 	return true;
 }
 
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
-				  struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode,
-				  int x, int y,
-				  struct drm_framebuffer *fb)
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t temp;
+
+	temp = I915_READ(SOUTH_CHICKEN1);
+	if (temp & FDI_BC_BIFURCATION_SELECT)
+		return;
+
+	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+	temp |= FDI_BC_BIFURCATION_SELECT;
+	DRM_DEBUG_KMS("enabling fdi C rx\n");
+	I915_WRITE(SOUTH_CHICKEN1, temp);
+	POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *pipe_B_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+	DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
+		      intel_crtc->pipe, intel_crtc->fdi_lanes);
+	if (intel_crtc->fdi_lanes > 4) {
+		DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
+			      intel_crtc->pipe, intel_crtc->fdi_lanes);
+		/* Clamp lanes to avoid programming the hw with bogus values. */
+		intel_crtc->fdi_lanes = 4;
+
+		return false;
+	}
+
+	if (dev_priv->num_pipe == 2)
+		return true;
+
+	switch (intel_crtc->pipe) {
+	case PIPE_A:
+		return true;
+	case PIPE_B:
+		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+		    intel_crtc->fdi_lanes > 2) {
+			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+				      intel_crtc->pipe, intel_crtc->fdi_lanes);
+			/* Clamp lanes to avoid programming the hw with bogus values. */
+			intel_crtc->fdi_lanes = 2;
+
+			return false;
+		}
+
+		if (intel_crtc->fdi_lanes > 2)
+			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+		else
+			cpt_enable_fdi_bc_bifurcation(dev);
+
+		return true;
+	case PIPE_C:
+		if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
+			if (intel_crtc->fdi_lanes > 2) {
+				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+					      intel_crtc->pipe, intel_crtc->fdi_lanes);
+				/* Clamp lanes to avoid programming the hw with bogus values. */
+				intel_crtc->fdi_lanes = 2;
+
+				return false;
+			}
+		} else {
+			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+			return false;
+		}
+
+		cpt_enable_fdi_bc_bifurcation(dev);
+
+		return true;
+	default:
+		BUG();
+	}
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+	/*
+	 * Account for spread spectrum to avoid
+	 * oversubscribing the link. Max center spread
+	 * is 2.5%; use 5% for safety's sake.
+	 */
+	u32 bps = target_clock * bpp * 21 / 20;
+	return bps / (link_bw * 8) + 1;
+}
+
+static void ironlake_set_m_n(struct drm_crtc *crtc,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
-	int plane = intel_crtc->plane;
-	int num_connectors = 0;
-	intel_clock_t clock, reduced_clock;
-	u32 dpll, fp = 0, fp2 = 0;
-	bool ok, has_reduced_clock = false, is_sdvo = false;
-	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
-	struct intel_encoder *encoder, *edp_encoder = NULL;
-	int ret;
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+	struct intel_encoder *intel_encoder, *edp_encoder = NULL;
 	struct fdi_m_n m_n = {0};
-	u32 temp;
-	int target_clock, pixel_multiplier, lane, link_bw, factor;
-	unsigned int pipe_bpp;
-	bool dither;
-	bool is_cpu_edp = false, is_pch_edp = false;
+	int target_clock, pixel_multiplier, lane, link_bw;
+	bool is_dp = false, is_cpu_edp = false;
 
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		switch (encoder->type) {
-		case INTEL_OUTPUT_LVDS:
-			is_lvds = true;
-			break;
-		case INTEL_OUTPUT_SDVO:
-		case INTEL_OUTPUT_HDMI:
-			is_sdvo = true;
-			if (encoder->needs_tv_clock)
-				is_tv = true;
-			break;
-		case INTEL_OUTPUT_TVOUT:
-			is_tv = true;
-			break;
-		case INTEL_OUTPUT_ANALOG:
-			is_crt = true;
-			break;
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		switch (intel_encoder->type) {
 		case INTEL_OUTPUT_DISPLAYPORT:
 			is_dp = true;
 			break;
 		case INTEL_OUTPUT_EDP:
 			is_dp = true;
-			if (intel_encoder_is_pch_edp(&encoder->base))
-				is_pch_edp = true;
-			else
+			if (!intel_encoder_is_pch_edp(&intel_encoder->base))
 				is_cpu_edp = true;
-			edp_encoder = encoder;
+			edp_encoder = intel_encoder;
 			break;
 		}
-
-		num_connectors++;
 	}
 
-	ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
-				     &has_reduced_clock, &reduced_clock);
-	if (!ok) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
-	}
-
-	/* Ensure that the cursor is valid for the new mode before changing... */
-	intel_crtc_update_cursor(crtc, true);
-
 	/* FDI link */
 	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 	lane = 0;
@@ -4843,29 +5444,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 	else
 		target_clock = adjusted_mode->clock;
 
-	/* determine panel color depth */
-	dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
-					      adjusted_mode);
-	if (is_lvds && dev_priv->lvds_dither)
-		dither = true;
-
-	if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
-	    pipe_bpp != 36) {
-		WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
-		     pipe_bpp);
-		pipe_bpp = 24;
-	}
-	intel_crtc->bpp = pipe_bpp;
-
-	if (!lane) {
-		/*
-		 * Account for spread spectrum to avoid
-		 * oversubscribing the link. Max center spread
-		 * is 2.5%; use 5% for safety's sake.
-		 */
-		u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
-		lane = bps / (link_bw * 8) + 1;
-	}
+	if (!lane)
+		lane = ironlake_get_lanes_required(target_clock, link_bw,
+						   intel_crtc->bpp);
 
 	intel_crtc->fdi_lanes = lane;
 
@@ -4874,10 +5455,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 	ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
 			     &m_n);
 
-	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
-	if (has_reduced_clock)
-		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
-			reduced_clock.m2;
+	I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
+	I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+	I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+	I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+}
+
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+				      struct drm_display_mode *adjusted_mode,
+				      intel_clock_t *clock, u32 fp)
+{
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	uint32_t dpll;
+	int factor, pixel_multiplier, num_connectors = 0;
+	bool is_lvds = false, is_sdvo = false, is_tv = false;
+	bool is_dp = false, is_cpu_edp = false;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		switch (intel_encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_SDVO:
+		case INTEL_OUTPUT_HDMI:
+			is_sdvo = true;
+			if (intel_encoder->needs_tv_clock)
+				is_tv = true;
+			break;
+		case INTEL_OUTPUT_TVOUT:
+			is_tv = true;
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			is_dp = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			is_dp = true;
+			if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+				is_cpu_edp = true;
+			break;
+		}
+
+		num_connectors++;
+	}
 
 	/* Enable autotuning of the PLL clock (if permissible) */
 	factor = 21;
@@ -4889,7 +5511,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 	} else if (is_sdvo && is_tv)
 		factor = 20;
 
-	if (clock.m < factor * clock.n)
+	if (clock->m < factor * clock->n)
 		fp |= FP_CB_TUNE;
 
 	dpll = 0;
@@ -4899,7 +5521,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 	else
 		dpll |= DPLLB_MODE_DAC_SERIAL;
 	if (is_sdvo) {
-		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+		pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 		if (pixel_multiplier > 1) {
 			dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 		}
@@ -4909,11 +5531,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 		dpll |= DPLL_DVO_HIGH_SPEED;
 
 	/* compute bitmask from p1 value */
-	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 	/* also FPA1 */
-	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 
-	switch (clock.p2) {
+	switch (clock->p2) {
 	case 5:
 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
 		break;
@@ -4939,15 +5561,79 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 	else
 		dpll |= PLL_REF_INPUT_DREFCLK;
 
+	return dpll;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode,
+				  int x, int y,
+				  struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	int num_connectors = 0;
+	intel_clock_t clock, reduced_clock;
+	u32 dpll, fp = 0, fp2 = 0;
+	bool ok, has_reduced_clock = false;
+	bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+	struct intel_encoder *encoder;
+	u32 temp;
+	int ret;
+	bool dither, fdi_config_ok;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			is_dp = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			is_dp = true;
+			if (!intel_encoder_is_pch_edp(&encoder->base))
+				is_cpu_edp = true;
+			break;
+		}
+
+		num_connectors++;
+	}
+
+	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+	ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+				     &has_reduced_clock, &reduced_clock);
+	if (!ok) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	/* Ensure that the cursor is valid for the new mode before changing... */
+	intel_crtc_update_cursor(crtc, true);
+
+	/* determine panel color depth */
+	dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+					      adjusted_mode);
+	if (is_lvds && dev_priv->lvds_dither)
+		dither = true;
+
+	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+	if (has_reduced_clock)
+		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+			reduced_clock.m2;
+
+	dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
+
 	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
 	drm_mode_debug_printmodeline(mode);
 
-	/* CPU eDP is the only output that doesn't need a PCH PLL of its own on
-	 * pre-Haswell/LPT generation */
-	if (HAS_PCH_LPT(dev)) {
-		DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
-				pipe);
-	} else if (!is_cpu_edp) {
+	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+	if (!is_cpu_edp) {
 		struct intel_pch_pll *pll;
 
 		pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5033,47 +5719,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 		}
 	}
 
-	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		/* the chip adds 2 halflines automatically */
-		adjusted_mode->crtc_vtotal -= 1;
-		adjusted_mode->crtc_vblank_end -= 1;
-		I915_WRITE(VSYNCSHIFT(pipe),
-			   adjusted_mode->crtc_hsync_start
-			   - adjusted_mode->crtc_htotal/2);
-	} else {
-		I915_WRITE(VSYNCSHIFT(pipe), 0);
-	}
+	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
-	I915_WRITE(HTOTAL(pipe),
-		   (adjusted_mode->crtc_hdisplay - 1) |
-		   ((adjusted_mode->crtc_htotal - 1) << 16));
-	I915_WRITE(HBLANK(pipe),
-		   (adjusted_mode->crtc_hblank_start - 1) |
-		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
-	I915_WRITE(HSYNC(pipe),
-		   (adjusted_mode->crtc_hsync_start - 1) |
-		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
+	/* Note, this also computes intel_crtc->fdi_lanes which is used below in
+	 * ironlake_check_fdi_lanes. */
+	ironlake_set_m_n(crtc, mode, adjusted_mode);
 
-	I915_WRITE(VTOTAL(pipe),
-		   (adjusted_mode->crtc_vdisplay - 1) |
-		   ((adjusted_mode->crtc_vtotal - 1) << 16));
-	I915_WRITE(VBLANK(pipe),
-		   (adjusted_mode->crtc_vblank_start - 1) |
-		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
-	I915_WRITE(VSYNC(pipe),
-		   (adjusted_mode->crtc_vsync_start - 1) |
-		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
-
-	/* pipesrc controls the size that is scaled from, which should
-	 * always be the user's requested size.
-	 */
-	I915_WRITE(PIPESRC(pipe),
-		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-
-	I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
-	I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
-	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
-	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+	fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
 
 	if (is_cpu_edp)
 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
@@ -5092,6 +5744,217 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 
 	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
 
+	return fdi_config_ok ? ret : -EINVAL;
+}
+
+static int haswell_crtc_mode_set(struct drm_crtc *crtc,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode,
+				 int x, int y,
+				 struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	int num_connectors = 0;
+	intel_clock_t clock, reduced_clock;
+	u32 dpll = 0, fp = 0, fp2 = 0;
+	bool ok, has_reduced_clock = false;
+	bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+	struct intel_encoder *encoder;
+	u32 temp;
+	int ret;
+	bool dither;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			is_dp = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			is_dp = true;
+			if (!intel_encoder_is_pch_edp(&encoder->base))
+				is_cpu_edp = true;
+			break;
+		}
+
+		num_connectors++;
+	}
+
+	if (is_cpu_edp)
+		intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+	else
+		intel_crtc->cpu_transcoder = pipe;
+
+	/* We are not sure yet this won't happen. */
+	WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
+	     INTEL_PCH_TYPE(dev));
+
+	WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
+	     num_connectors, pipe_name(pipe));
+
+	WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
+		(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
+
+	WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
+
+	if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
+		return -EINVAL;
+
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+					     &has_reduced_clock,
+					     &reduced_clock);
+		if (!ok) {
+			DRM_ERROR("Couldn't find PLL settings for mode!\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Ensure that the cursor is valid for the new mode before changing... */
+	intel_crtc_update_cursor(crtc, true);
+
+	/* determine panel color depth */
+	dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+					      adjusted_mode);
+	if (is_lvds && dev_priv->lvds_dither)
+		dither = true;
+
+	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+	drm_mode_debug_printmodeline(mode);
+
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+		if (has_reduced_clock)
+			fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+			      reduced_clock.m2;
+
+		dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
+					     fp);
+
+		/* CPU eDP is the only output that doesn't need a PCH PLL of its
+		 * own on pre-Haswell/LPT generation */
+		if (!is_cpu_edp) {
+			struct intel_pch_pll *pll;
+
+			pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+			if (pll == NULL) {
+				DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+						 pipe);
+				return -EINVAL;
+			}
+		} else
+			intel_put_pch_pll(intel_crtc);
+
+		/* The LVDS pin pair needs to be on before the DPLLs are
+		 * enabled.  This is an exception to the general rule that
+		 * mode_set doesn't turn things on.
+		 */
+		if (is_lvds) {
+			temp = I915_READ(PCH_LVDS);
+			temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+			if (HAS_PCH_CPT(dev)) {
+				temp &= ~PORT_TRANS_SEL_MASK;
+				temp |= PORT_TRANS_SEL_CPT(pipe);
+			} else {
+				if (pipe == 1)
+					temp |= LVDS_PIPEB_SELECT;
+				else
+					temp &= ~LVDS_PIPEB_SELECT;
+			}
+
+			/* set the corresponsding LVDS_BORDER bit */
+			temp |= dev_priv->lvds_border_bits;
+			/* Set the B0-B3 data pairs corresponding to whether
+			 * we're going to set the DPLLs for dual-channel mode or
+			 * not.
+			 */
+			if (clock.p2 == 7)
+				temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+			else
+				temp &= ~(LVDS_B0B3_POWER_UP |
+					  LVDS_CLKB_POWER_UP);
+
+			/* It would be nice to set 24 vs 18-bit mode
+			 * (LVDS_A3_POWER_UP) appropriately here, but we need to
+			 * look more thoroughly into how panels behave in the
+			 * two modes.
+			 */
+			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+			if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+				temp |= LVDS_HSYNC_POLARITY;
+			if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+				temp |= LVDS_VSYNC_POLARITY;
+			I915_WRITE(PCH_LVDS, temp);
+		}
+	}
+
+	if (is_dp && !is_cpu_edp) {
+		intel_dp_set_m_n(crtc, mode, adjusted_mode);
+	} else {
+		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+			/* For non-DP output, clear any trans DP clock recovery
+			 * setting.*/
+			I915_WRITE(TRANSDATA_M1(pipe), 0);
+			I915_WRITE(TRANSDATA_N1(pipe), 0);
+			I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+			I915_WRITE(TRANSDPLINK_N1(pipe), 0);
+		}
+	}
+
+	intel_crtc->lowfreq_avail = false;
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		if (intel_crtc->pch_pll) {
+			I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+
+			/* Wait for the clocks to stabilize. */
+			POSTING_READ(intel_crtc->pch_pll->pll_reg);
+			udelay(150);
+
+			/* The pixel multiplier can only be updated once the
+			 * DPLL is enabled and the clocks are stable.
+			 *
+			 * So write it again.
+			 */
+			I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+		}
+
+		if (intel_crtc->pch_pll) {
+			if (is_lvds && has_reduced_clock && i915_powersave) {
+				I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
+				intel_crtc->lowfreq_avail = true;
+			} else {
+				I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
+			}
+		}
+	}
+
+	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+	if (!is_dp || is_cpu_edp)
+		ironlake_set_m_n(crtc, mode, adjusted_mode);
+
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		if (is_cpu_edp)
+			ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+
+	haswell_set_pipeconf(crtc, adjusted_mode, dither);
+
+	/* Set up the display plane register */
+	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+	POSTING_READ(DSPCNTR(plane));
+
+	ret = intel_pipe_set_base(crtc, x, y, fb);
+
+	intel_update_watermarks(dev);
+
+	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
 	return ret;
 }
 
@@ -5103,6 +5966,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct intel_encoder *encoder;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	int ret;
@@ -5113,7 +5978,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 					      x, y, fb);
 	drm_vblank_post_modeset(dev, pipe);
 
-	return ret;
+	if (ret != 0)
+		return ret;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+			encoder->base.base.id,
+			drm_get_encoder_name(&encoder->base),
+			mode->base.id, mode->name);
+		encoder_funcs = encoder->base.helper_private;
+		encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+	}
+
+	return 0;
 }
 
 static bool intel_eld_uptodate(struct drm_connector *connector,
@@ -5749,7 +6626,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
 				  int depth, int bpp)
 {
 	struct drm_i915_gem_object *obj;
-	struct drm_mode_fb_cmd2 mode_cmd;
+	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 
 	obj = i915_gem_alloc_object(dev,
 				    intel_framebuffer_size_for_mode(mode, bpp));
@@ -5879,24 +6756,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
 	if (IS_ERR(fb)) {
 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
-		goto fail;
+		return false;
 	}
 
 	if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
 		if (old->release_fb)
 			old->release_fb->funcs->destroy(old->release_fb);
-		goto fail;
+		return false;
 	}
 
 	/* let the connector get through one full cycle before testing */
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
-
 	return true;
-fail:
-	connector->encoder = NULL;
-	encoder->crtc = NULL;
-	return false;
 }
 
 void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -6021,12 +6893,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
 	struct drm_display_mode *mode;
-	int htot = I915_READ(HTOTAL(pipe));
-	int hsync = I915_READ(HSYNC(pipe));
-	int vtot = I915_READ(VTOTAL(pipe));
-	int vsync = I915_READ(VSYNC(pipe));
+	int htot = I915_READ(HTOTAL(cpu_transcoder));
+	int hsync = I915_READ(HSYNC(cpu_transcoder));
+	int vtot = I915_READ(VTOTAL(cpu_transcoder));
+	int vsync = I915_READ(VSYNC(cpu_transcoder));
 
 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
 	if (!mode)
@@ -6183,14 +7055,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 {
 	struct intel_unpin_work *work =
 		container_of(__work, struct intel_unpin_work, work);
+	struct drm_device *dev = work->crtc->dev;
 
-	mutex_lock(&work->dev->struct_mutex);
+	mutex_lock(&dev->struct_mutex);
 	intel_unpin_fb_obj(work->old_fb_obj);
 	drm_gem_object_unreference(&work->pending_flip_obj->base);
 	drm_gem_object_unreference(&work->old_fb_obj->base);
 
-	intel_update_fbc(work->dev);
-	mutex_unlock(&work->dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+
 	kfree(work);
 }
 
@@ -6201,8 +7078,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_unpin_work *work;
 	struct drm_i915_gem_object *obj;
-	struct drm_pending_vblank_event *e;
-	struct timeval tvbl;
 	unsigned long flags;
 
 	/* Ignore early vblank irqs */
@@ -6211,24 +7086,22 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
 	spin_lock_irqsave(&dev->event_lock, flags);
 	work = intel_crtc->unpin_work;
-	if (work == NULL || !work->pending) {
+
+	/* Ensure we don't miss a work->pending update ... */
+	smp_rmb();
+
+	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 		return;
 	}
 
+	/* and that the unpin work is consistent wrt ->pending. */
+	smp_rmb();
+
 	intel_crtc->unpin_work = NULL;
 
-	if (work->event) {
-		e = work->event;
-		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
-
-		e->event.tv_sec = tvbl.tv_sec;
-		e->event.tv_usec = tvbl.tv_usec;
-
-		list_add_tail(&e->base.link,
-			      &e->base.file_priv->event_list);
-		wake_up_interruptible(&e->base.file_priv->event_wait);
-	}
+	if (work->event)
+		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
 
 	drm_vblank_put(dev, intel_crtc->pipe);
 
@@ -6238,9 +7111,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
 	atomic_clear_mask(1 << intel_crtc->plane,
 			  &obj->pending_flip.counter);
-
 	wake_up(&dev_priv->pending_flip_queue);
-	schedule_work(&work->work);
+
+	queue_work(dev_priv->wq, &work->work);
 
 	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
 }
@@ -6268,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
 	unsigned long flags;
 
+	/* NB: An MMIO update of the plane base pointer will also
+	 * generate a page-flip completion irq, i.e. every modeset
+	 * is also accompanied by a spurious intel_prepare_page_flip().
+	 */
 	spin_lock_irqsave(&dev->event_lock, flags);
-	if (intel_crtc->unpin_work) {
-		if ((++intel_crtc->unpin_work->pending) > 1)
-			DRM_ERROR("Prepared flip multiple times\n");
-	} else {
-		DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
-	}
+	if (intel_crtc->unpin_work)
+		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+	/* Ensure that the work item is consistent when activating it ... */
+	smp_wmb();
+	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+	/* and that it is marked active as soon as the irq could fire. */
+	smp_wmb();
+}
+
 static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
@@ -6311,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 	intel_ring_emit(ring, fb->pitches[0]);
 	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
 	intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+	intel_mark_page_flip_active(intel_crtc);
 	intel_ring_advance(ring);
 	return 0;
 
@@ -6351,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
 	intel_ring_emit(ring, MI_NOOP);
 
+	intel_mark_page_flip_active(intel_crtc);
 	intel_ring_advance(ring);
 	return 0;
 
@@ -6397,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
 	intel_ring_emit(ring, pf | pipesrc);
+
+	intel_mark_page_flip_active(intel_crtc);
 	intel_ring_advance(ring);
 	return 0;
 
@@ -6439,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
 	intel_ring_emit(ring, pf | pipesrc);
+
+	intel_mark_page_flip_active(intel_crtc);
 	intel_ring_advance(ring);
 	return 0;
 
@@ -6493,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
 	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
 	intel_ring_emit(ring, (MI_NOOP));
+
+	intel_mark_page_flip_active(intel_crtc);
 	intel_ring_advance(ring);
 	return 0;
 
@@ -6541,7 +7432,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		return -ENOMEM;
 
 	work->event = event;
-	work->dev = crtc->dev;
+	work->crtc = crtc;
 	intel_fb = to_intel_framebuffer(crtc->fb);
 	work->old_fb_obj = intel_fb->obj;
 	INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -6566,6 +7457,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	intel_fb = to_intel_framebuffer(fb);
 	obj = intel_fb->obj;
 
+	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+		flush_workqueue(dev_priv->wq);
+
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		goto cleanup;
@@ -6584,6 +7478,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	 * the flip occurs and the object is no longer visible.
 	 */
 	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+	atomic_inc(&intel_crtc->unpin_work_count);
 
 	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
 	if (ret)
@@ -6598,6 +7493,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	return 0;
 
 cleanup_pending:
+	atomic_dec(&intel_crtc->unpin_work_count);
 	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 	drm_gem_object_unreference(&work->old_fb_obj->base);
 	drm_gem_object_unreference(&obj->base);
@@ -6893,7 +7789,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
 				dev->mode_config.dpms_property;
 
 			connector->dpms = DRM_MODE_DPMS_ON;
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 dpms_property,
 							 DRM_MODE_DPMS_ON);
 
@@ -7015,8 +7911,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
 	struct drm_device *dev = crtc->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
-	struct drm_encoder_helper_funcs *encoder_funcs;
-	struct drm_encoder *encoder;
 	struct intel_crtc *intel_crtc;
 	unsigned disable_pipes, prepare_pipes, modeset_pipes;
 	bool ret = true;
@@ -7061,6 +7955,9 @@ bool intel_set_mode(struct drm_crtc *crtc,
 	 * update the the output configuration. */
 	intel_modeset_update_state(dev, prepare_pipes);
 
+	if (dev_priv->display.modeset_global_resources)
+		dev_priv->display.modeset_global_resources(dev);
+
 	/* Set up the DPLL and any encoders state that needs to adjust or depend
 	 * on the DPLL.
 	 */
@@ -7070,18 +7967,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
 					   x, y, fb);
 		if (!ret)
 		    goto done;
-
-		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
-			if (encoder->crtc != &intel_crtc->base)
-				continue;
-
-			DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
-				encoder->base.id, drm_get_encoder_name(encoder),
-				mode->base.id, mode->name);
-			encoder_funcs = encoder->helper_private;
-			encoder_funcs->mode_set(encoder, mode, adjusted_mode);
-		}
 	}
 
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7420,6 +8305,12 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
 	.page_flip = intel_crtc_page_flip,
 };
 
+static void intel_cpu_pll_init(struct drm_device *dev)
+{
+	if (IS_HASWELL(dev))
+		intel_ddi_pll_init(dev);
+}
+
 static void intel_pch_pll_init(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7459,6 +8350,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 	/* Swap pipes & planes for FBC on pre-965 */
 	intel_crtc->pipe = pipe;
 	intel_crtc->plane = pipe;
+	intel_crtc->cpu_transcoder = pipe;
 	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
 		intel_crtc->plane = !pipe;
@@ -7551,17 +8443,9 @@ static void intel_setup_outputs(struct drm_device *dev)
 		I915_WRITE(PFIT_CONTROL, 0);
 	}
 
-	if (HAS_PCH_SPLIT(dev)) {
-		dpd_is_edp = intel_dpd_is_edp(dev);
-
-		if (has_edp_a(dev))
-			intel_dp_init(dev, DP_A, PORT_A);
-
-		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
-			intel_dp_init(dev, PCH_DP_D, PORT_D);
-	}
-
-	intel_crt_init(dev);
+	if (!(IS_HASWELL(dev) &&
+	      (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+		intel_crt_init(dev);
 
 	if (IS_HASWELL(dev)) {
 		int found;
@@ -7584,6 +8468,10 @@ static void intel_setup_outputs(struct drm_device *dev)
 			intel_ddi_init(dev, PORT_D);
 	} else if (HAS_PCH_SPLIT(dev)) {
 		int found;
+		dpd_is_edp = intel_dpd_is_edp(dev);
+
+		if (has_edp_a(dev))
+			intel_dp_init(dev, DP_A, PORT_A);
 
 		if (I915_READ(HDMIB) & PORT_DETECTED) {
 			/* PCH SDVOB multiplex with HDMIB */
@@ -7603,11 +8491,15 @@ static void intel_setup_outputs(struct drm_device *dev)
 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
 			intel_dp_init(dev, PCH_DP_C, PORT_C);
 
-		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+		if (I915_READ(PCH_DP_D) & DP_DETECTED)
 			intel_dp_init(dev, PCH_DP_D, PORT_D);
 	} else if (IS_VALLEYVIEW(dev)) {
 		int found;
 
+		/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+		if (I915_READ(DP_C) & DP_DETECTED)
+			intel_dp_init(dev, DP_C, PORT_C);
+
 		if (I915_READ(SDVOB) & PORT_DETECTED) {
 			/* SDVOB multiplex with HDMIB */
 			found = intel_sdvo_init(dev, SDVOB, true);
@@ -7620,9 +8512,6 @@ static void intel_setup_outputs(struct drm_device *dev)
 		if (I915_READ(SDVOC) & PORT_DETECTED)
 			intel_hdmi_init(dev, SDVOC, PORT_C);
 
-		/* Shares lanes with HDMI on SDVOC */
-		if (I915_READ(DP_C) & DP_DETECTED)
-			intel_dp_init(dev, DP_C, PORT_C);
 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
 		bool found = false;
 
@@ -7676,8 +8565,9 @@ static void intel_setup_outputs(struct drm_device *dev)
 			intel_encoder_clones(encoder);
 	}
 
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-		ironlake_init_pch_refclk(dev);
+	intel_init_pch_refclk(dev);
+
+	drm_helper_move_panel_connectors_to_head(dev);
 }
 
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -7718,27 +8608,51 @@ int intel_framebuffer_init(struct drm_device *dev,
 	if (mode_cmd->pitches[0] & 63)
 		return -EINVAL;
 
+	/* FIXME <= Gen4 stride limits are bit unclear */
+	if (mode_cmd->pitches[0] > 32768)
+		return -EINVAL;
+
+	if (obj->tiling_mode != I915_TILING_NONE &&
+	    mode_cmd->pitches[0] != obj->stride)
+		return -EINVAL;
+
+	/* Reject formats not supported by any plane early. */
 	switch (mode_cmd->pixel_format) {
-	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_C8:
 	case DRM_FORMAT_RGB565:
 	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_XBGR8888:
 	case DRM_FORMAT_ARGB8888:
+		break;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_ARGB1555:
+		if (INTEL_INFO(dev)->gen > 3)
+			return -EINVAL;
+		break;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
 	case DRM_FORMAT_XRGB2101010:
 	case DRM_FORMAT_ARGB2101010:
-		/* RGB formats are common across chipsets */
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		if (INTEL_INFO(dev)->gen < 4)
+			return -EINVAL;
 		break;
 	case DRM_FORMAT_YUYV:
 	case DRM_FORMAT_UYVY:
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_VYUY:
+		if (INTEL_INFO(dev)->gen < 6)
+			return -EINVAL;
 		break;
 	default:
-		DRM_DEBUG_KMS("unsupported pixel format %u\n",
-				mode_cmd->pixel_format);
+		DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
 		return -EINVAL;
 	}
 
+	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+	if (mode_cmd->offsets[0] != 0)
+		return -EINVAL;
+
 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
 	if (ret) {
 		DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -7776,7 +8690,13 @@ static void intel_init_display(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	/* We always want a DPMS function */
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_HASWELL(dev)) {
+		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+		dev_priv->display.crtc_enable = haswell_crtc_enable;
+		dev_priv->display.crtc_disable = haswell_crtc_disable;
+		dev_priv->display.off = haswell_crtc_off;
+		dev_priv->display.update_plane = ironlake_update_plane;
+	} else if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -7827,6 +8747,8 @@ static void intel_init_display(struct drm_device *dev)
 			/* FIXME: detect B0+ stepping and use auto training */
 			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
 			dev_priv->display.write_eld = ironlake_write_eld;
+			dev_priv->display.modeset_global_resources =
+				ivb_modeset_global_resources;
 		} else if (IS_HASWELL(dev)) {
 			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
 			dev_priv->display.write_eld = haswell_write_eld;
@@ -8058,6 +8980,7 @@ void intel_modeset_init(struct drm_device *dev)
 			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
 	}
 
+	intel_cpu_pll_init(dev);
 	intel_pch_pll_init(dev);
 
 	/* Just disable it once at startup */
@@ -8127,7 +9050,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 	u32 reg;
 
 	/* Clear any frame start delays used for debugging left by the BIOS */
-	reg = PIPECONF(crtc->pipe);
+	reg = PIPECONF(crtc->cpu_transcoder);
 	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 
 	/* We need to sanitize the plane -> pipe mapping first because this will
@@ -8246,7 +9169,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 
 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
  * and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev)
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+				  bool force_restore)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum pipe pipe;
@@ -8255,10 +9179,35 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
 	struct intel_encoder *encoder;
 	struct intel_connector *connector;
 
+	if (IS_HASWELL(dev)) {
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+		if (tmp & TRANS_DDI_FUNC_ENABLE) {
+			switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+			case TRANS_DDI_EDP_INPUT_A_ON:
+			case TRANS_DDI_EDP_INPUT_A_ONOFF:
+				pipe = PIPE_A;
+				break;
+			case TRANS_DDI_EDP_INPUT_B_ONOFF:
+				pipe = PIPE_B;
+				break;
+			case TRANS_DDI_EDP_INPUT_C_ONOFF:
+				pipe = PIPE_C;
+				break;
+			}
+
+			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+			crtc->cpu_transcoder = TRANSCODER_EDP;
+
+			DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
+				      pipe_name(pipe));
+		}
+	}
+
 	for_each_pipe(pipe) {
 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
-		tmp = I915_READ(PIPECONF(pipe));
+		tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
 		if (tmp & PIPECONF_ENABLE)
 			crtc->active = true;
 		else
@@ -8271,6 +9220,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
 			      crtc->active ? "enabled" : "disabled");
 	}
 
+	if (IS_HASWELL(dev))
+		intel_ddi_setup_hw_pll_state(dev);
+
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
 			    base.head) {
 		pipe = 0;
@@ -8317,9 +9269,19 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
 		intel_sanitize_crtc(crtc);
 	}
 
-	intel_modeset_update_staged_output_state(dev);
+	if (force_restore) {
+		for_each_pipe(pipe) {
+			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+			intel_set_mode(&crtc->base, &crtc->base.mode,
+				       crtc->base.x, crtc->base.y, crtc->base.fb);
+		}
+	} else {
+		intel_modeset_update_staged_output_state(dev);
+	}
 
 	intel_modeset_check_state(dev);
+
+	drm_mode_config_reset(dev);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -8328,7 +9290,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
 
 	intel_setup_overlay(dev);
 
-	intel_modeset_setup_hw_state(dev);
+	intel_modeset_setup_hw_state(dev, false);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
@@ -8447,6 +9409,7 @@ intel_display_capture_error_state(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_display_error_state *error;
+	enum transcoder cpu_transcoder;
 	int i;
 
 	error = kmalloc(sizeof(*error), GFP_ATOMIC);
@@ -8454,6 +9417,8 @@ intel_display_capture_error_state(struct drm_device *dev)
 		return NULL;
 
 	for_each_pipe(i) {
+		cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
+
 		error->cursor[i].control = I915_READ(CURCNTR(i));
 		error->cursor[i].position = I915_READ(CURPOS(i));
 		error->cursor[i].base = I915_READ(CURBASE(i));
@@ -8468,14 +9433,14 @@ intel_display_capture_error_state(struct drm_device *dev)
 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
 		}
 
-		error->pipe[i].conf = I915_READ(PIPECONF(i));
+		error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
 		error->pipe[i].source = I915_READ(PIPESRC(i));
-		error->pipe[i].htotal = I915_READ(HTOTAL(i));
-		error->pipe[i].hblank = I915_READ(HBLANK(i));
-		error->pipe[i].hsync = I915_READ(HSYNC(i));
-		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
-		error->pipe[i].vblank = I915_READ(VBLANK(i));
-		error->pipe[i].vsync = I915_READ(VSYNC(i));
+		error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+		error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+		error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+		error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+		error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+		error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
 	}
 
 	return error;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 368ed8ef1600..1b63d55318a0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,8 +36,6 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-#define DP_RECEIVER_CAP_SIZE	0xf
-#define DP_LINK_STATUS_SIZE	6
 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
 
 /**
@@ -49,7 +47,9 @@
  */
 static bool is_edp(struct intel_dp *intel_dp)
 {
-	return intel_dp->base.type == INTEL_OUTPUT_EDP;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 }
 
 /**
@@ -76,15 +76,16 @@ static bool is_cpu_edp(struct intel_dp *intel_dp)
 	return is_edp(intel_dp) && !is_pch_edp(intel_dp);
 }
 
-static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
 {
-	return container_of(encoder, struct intel_dp, base.base);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+	return intel_dig_port->base.base.dev;
 }
 
 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 {
-	return container_of(intel_attached_encoder(connector),
-			    struct intel_dp, base);
+	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
 }
 
 /**
@@ -106,48 +107,31 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
 	return is_pch_edp(intel_dp);
 }
 
-static void intel_dp_start_link_train(struct intel_dp *intel_dp);
-static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
 static void intel_dp_link_down(struct intel_dp *intel_dp);
 
 void
 intel_edp_link_config(struct intel_encoder *intel_encoder,
 		       int *lane_num, int *link_bw)
 {
-	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
 	*lane_num = intel_dp->lane_count;
-	if (intel_dp->link_bw == DP_LINK_BW_1_62)
-		*link_bw = 162000;
-	else if (intel_dp->link_bw == DP_LINK_BW_2_7)
-		*link_bw = 270000;
+	*link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
 }
 
 int
 intel_edp_target_clock(struct intel_encoder *intel_encoder,
 		       struct drm_display_mode *mode)
 {
-	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
 
-	if (intel_dp->panel_fixed_mode)
-		return intel_dp->panel_fixed_mode->clock;
+	if (intel_connector->panel.fixed_mode)
+		return intel_connector->panel.fixed_mode->clock;
 	else
 		return mode->clock;
 }
 
-static int
-intel_dp_max_lane_count(struct intel_dp *intel_dp)
-{
-	int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
-	switch (max_lane_count) {
-	case 1: case 2: case 4:
-		break;
-	default:
-		max_lane_count = 4;
-	}
-	return max_lane_count;
-}
-
 static int
 intel_dp_max_link_bw(struct intel_dp *intel_dp)
 {
@@ -208,7 +192,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
 			  bool adjust_mode)
 {
 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
-	int max_lanes = intel_dp_max_lane_count(intel_dp);
+	int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
 	int max_rate, mode_rate;
 
 	mode_rate = intel_dp_link_required(mode->clock, 24);
@@ -234,12 +218,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 
-	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
-		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+	if (is_edp(intel_dp) && fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
 			return MODE_PANEL;
 
-		if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+		if (mode->vdisplay > fixed_mode->vdisplay)
 			return MODE_PANEL;
 	}
 
@@ -285,6 +271,10 @@ intel_hrawclk(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t clkcfg;
 
+	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+	if (IS_VALLEYVIEW(dev))
+		return 200;
+
 	clkcfg = I915_READ(CLKCFG);
 	switch (clkcfg & CLKCFG_FSB_MASK) {
 	case CLKCFG_FSB_400:
@@ -310,7 +300,7 @@ intel_hrawclk(struct drm_device *dev)
 
 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
@@ -318,7 +308,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 
 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
@@ -327,7 +317,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
 static void
 intel_dp_check_edp(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (!is_edp(intel_dp))
@@ -346,7 +336,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 		uint8_t *recv, int recv_size)
 {
 	uint32_t output_reg = intel_dp->output_reg;
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t ch_ctl = output_reg + 0x10;
 	uint32_t ch_data = ch_ctl + 4;
@@ -356,6 +347,29 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 	uint32_t aux_clock_divider;
 	int try, precharge;
 
+	if (IS_HASWELL(dev)) {
+		switch (intel_dig_port->port) {
+		case PORT_A:
+			ch_ctl = DPA_AUX_CH_CTL;
+			ch_data = DPA_AUX_CH_DATA1;
+			break;
+		case PORT_B:
+			ch_ctl = PCH_DPB_AUX_CH_CTL;
+			ch_data = PCH_DPB_AUX_CH_DATA1;
+			break;
+		case PORT_C:
+			ch_ctl = PCH_DPC_AUX_CH_CTL;
+			ch_data = PCH_DPC_AUX_CH_DATA1;
+			break;
+		case PORT_D:
+			ch_ctl = PCH_DPD_AUX_CH_CTL;
+			ch_data = PCH_DPD_AUX_CH_DATA1;
+			break;
+		default:
+			BUG();
+		}
+	}
+
 	intel_dp_check_edp(intel_dp);
 	/* The clock divider is based off the hrawclk,
 	 * and would like to run at 2MHz. So, take the
@@ -365,12 +379,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 	 * clock divider.
 	 */
 	if (is_cpu_edp(intel_dp)) {
-		if (IS_GEN6(dev) || IS_GEN7(dev))
+		if (IS_HASWELL(dev))
+			aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
+		else if (IS_VALLEYVIEW(dev))
+			aux_clock_divider = 100;
+		else if (IS_GEN6(dev) || IS_GEN7(dev))
 			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
 		else
 			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
 	} else if (HAS_PCH_SPLIT(dev))
-		aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
+		aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
 	else
 		aux_clock_divider = intel_hrawclk(dev) / 2;
 
@@ -642,9 +660,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 	return -EREMOTEIO;
 }
 
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-
 static int
 intel_dp_i2c_init(struct intel_dp *intel_dp,
 		  struct intel_connector *intel_connector, const char *name)
@@ -670,22 +685,25 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
 	return ret;
 }
 
-static bool
+bool
 intel_dp_mode_fixup(struct drm_encoder *encoder,
 		    const struct drm_display_mode *mode,
 		    struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
 	int lane_count, clock;
-	int max_lane_count = intel_dp_max_lane_count(intel_dp);
+	int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
 	int bpp, mode_rate;
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
-	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
-		intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
-		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+				       adjusted_mode);
+		intel_pch_panel_fitting(dev,
+					intel_connector->panel.fitting_mode,
 					mode, adjusted_mode);
 	}
 
@@ -762,21 +780,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 		 struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *encoder;
+	struct intel_encoder *intel_encoder;
+	struct intel_dp *intel_dp;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int lane_count = 4;
 	struct intel_dp_m_n m_n;
 	int pipe = intel_crtc->pipe;
+	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
 
 	/*
 	 * Find the lane count in the intel_encoder private
 	 */
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
-		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
-		    intel_dp->base.type == INTEL_OUTPUT_EDP)
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		    intel_encoder->type == INTEL_OUTPUT_EDP)
 		{
 			lane_count = intel_dp->lane_count;
 			break;
@@ -791,23 +811,46 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 	intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
 			     mode->clock, adjusted_mode->clock, &m_n);
 
-	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(TRANSDATA_M1(pipe),
-			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
-			   m_n.gmch_m);
+	if (IS_HASWELL(dev)) {
+		I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
+			   TU_SIZE(m_n.tu) | m_n.gmch_m);
+		I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+		I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+		I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+	} else if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
 		I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
 		I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
 		I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
+	} else if (IS_VALLEYVIEW(dev)) {
+		I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+		I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+		I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+		I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
 	} else {
 		I915_WRITE(PIPE_GMCH_DATA_M(pipe),
-			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
-			   m_n.gmch_m);
+			   TU_SIZE(m_n.tu) | m_n.gmch_m);
 		I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
 		I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
 		I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
 	}
 }
 
+void intel_dp_init_link_config(struct intel_dp *intel_dp)
+{
+	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+	intel_dp->link_configuration[0] = intel_dp->link_bw;
+	intel_dp->link_configuration[1] = intel_dp->lane_count;
+	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+	/*
+	 * Check for DPCD version > 1.1 and enhanced framing support
+	 */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+	}
+}
+
 static void
 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		  struct drm_display_mode *adjusted_mode)
@@ -815,7 +858,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-	struct drm_crtc *crtc = intel_dp->base.base.crtc;
+	struct drm_crtc *crtc = encoder->crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
 	/*
@@ -860,21 +903,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 		intel_write_eld(encoder, adjusted_mode);
 	}
-	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
-	intel_dp->link_configuration[0] = intel_dp->link_bw;
-	intel_dp->link_configuration[1] = intel_dp->lane_count;
-	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
-	/*
-	 * Check for DPCD version > 1.1 and enhanced framing support
-	 */
-	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
-		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-	}
+
+	intel_dp_init_link_config(intel_dp);
 
 	/* Split out the IBX/CPU vs CPT settings */
 
-	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+	if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 			intel_dp->DP |= DP_SYNC_HS_HIGH;
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -931,7 +965,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
 				       u32 mask,
 				       u32 value)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
@@ -978,9 +1012,9 @@ static  u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
 	return control;
 }
 
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp;
 
@@ -1019,7 +1053,7 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
 
 static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp;
 
@@ -1041,14 +1075,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
 {
 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
 						 struct intel_dp, panel_vdd_work);
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
 	mutex_lock(&dev->mode_config.mutex);
 	ironlake_panel_vdd_off_sync(intel_dp);
 	mutex_unlock(&dev->mode_config.mutex);
 }
 
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 {
 	if (!is_edp(intel_dp))
 		return;
@@ -1071,9 +1105,9 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 	}
 }
 
-static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_on(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp;
 
@@ -1113,9 +1147,9 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
 	}
 }
 
-static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp;
 
@@ -1138,10 +1172,12 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 	ironlake_wait_panel_off(intel_dp);
 }
 
-static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
 	u32 pp;
 
 	if (!is_edp(intel_dp))
@@ -1159,17 +1195,21 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
 	pp |= EDP_BLC_ENABLE;
 	I915_WRITE(PCH_PP_CONTROL, pp);
 	POSTING_READ(PCH_PP_CONTROL);
+
+	intel_panel_enable_backlight(dev, pipe);
 }
 
-static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp;
 
 	if (!is_edp(intel_dp))
 		return;
 
+	intel_panel_disable_backlight(dev);
+
 	DRM_DEBUG_KMS("\n");
 	pp = ironlake_get_pp_control(dev_priv);
 	pp &= ~EDP_BLC_ENABLE;
@@ -1180,8 +1220,9 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
 
 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
-	struct drm_crtc *crtc = intel_dp->base.base.crtc;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpa_ctl;
 
@@ -1205,8 +1246,9 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
 
 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
-	struct drm_crtc *crtc = intel_dp->base.base.crtc;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpa_ctl;
 
@@ -1228,7 +1270,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
 }
 
 /* If the sink supports it, try to set the power state appropriately */
-static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
 {
 	int ret, i;
 
@@ -1298,9 +1340,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
 				return true;
 			}
 		}
-	}
 
-	DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+			      intel_dp->output_reg);
+	}
 
 	return true;
 }
@@ -1396,38 +1439,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
 					      DP_LINK_STATUS_SIZE);
 }
 
-static uint8_t
-intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
-		     int r)
-{
-	return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static uint8_t
-intel_get_adjust_request_voltage(uint8_t adjust_request[2],
-				 int lane)
-{
-	int	    s = ((lane & 1) ?
-			 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
-			 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
-	uint8_t l = adjust_request[lane>>1];
-
-	return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
-				      int lane)
-{
-	int	    s = ((lane & 1) ?
-			 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
-			 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
-	uint8_t l = adjust_request[lane>>1];
-
-	return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
-
 #if 0
 static char	*voltage_names[] = {
 	"0.4V", "0.6V", "0.8V", "1.2V"
@@ -1448,7 +1459,7 @@ static char	*link_train_names[] = {
 static uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
 	if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
 		return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1461,9 +1472,21 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
 static uint8_t
 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-	if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+	if (IS_HASWELL(dev)) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			return DP_TRAIN_PRE_EMPHASIS_9_5;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		case DP_TRAIN_VOLTAGE_SWING_1200:
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
+	} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 		case DP_TRAIN_VOLTAGE_SWING_400:
 			return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1494,13 +1517,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
 	uint8_t v = 0;
 	uint8_t p = 0;
 	int lane;
-	uint8_t	*adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
 	uint8_t voltage_max;
 	uint8_t preemph_max;
 
 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
-		uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
-		uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
+		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 
 		if (this_v > v)
 			v = this_v;
@@ -1617,52 +1639,38 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
 	}
 }
 
-static uint8_t
-intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
-		      int lane)
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_dp_signal_levels_hsw(uint8_t train_set)
 {
-	int s = (lane & 1) * 4;
-	uint8_t l = link_status[lane>>1];
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_400MV_0DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return DDI_BUF_EMP_400MV_3_5DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+		return DDI_BUF_EMP_400MV_6DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+		return DDI_BUF_EMP_400MV_9_5DB_HSW;
 
-	return (l >> s) & 0xf;
-}
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_600MV_0DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return DDI_BUF_EMP_600MV_3_5DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+		return DDI_BUF_EMP_600MV_6DB_HSW;
 
-/* Check for clock recovery is done on all channels */
-static bool
-intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
-{
-	int lane;
-	uint8_t lane_status;
-
-	for (lane = 0; lane < lane_count; lane++) {
-		lane_status = intel_get_lane_status(link_status, lane);
-		if ((lane_status & DP_LANE_CR_DONE) == 0)
-			return false;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_800MV_0DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return DDI_BUF_EMP_800MV_3_5DB_HSW;
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return DDI_BUF_EMP_400MV_0DB_HSW;
 	}
-	return true;
-}
-
-/* Check to see if channel eq is done on all channels */
-#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
-			 DP_LANE_CHANNEL_EQ_DONE|\
-			 DP_LANE_SYMBOL_LOCKED)
-static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
-{
-	uint8_t lane_align;
-	uint8_t lane_status;
-	int lane;
-
-	lane_align = intel_dp_link_status(link_status,
-					  DP_LANE_ALIGN_STATUS_UPDATED);
-	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
-		return false;
-	for (lane = 0; lane < intel_dp->lane_count; lane++) {
-		lane_status = intel_get_lane_status(link_status, lane);
-		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
-			return false;
-	}
-	return true;
 }
 
 static bool
@@ -1670,11 +1678,49 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
 			uint32_t dp_reg_value,
 			uint8_t dp_train_pat)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
 	int ret;
+	uint32_t temp;
 
-	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+	if (IS_HASWELL(dev)) {
+		temp = I915_READ(DP_TP_CTL(port));
+
+		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+		else
+			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+			I915_WRITE(DP_TP_CTL(port), temp);
+
+			if (wait_for((I915_READ(DP_TP_STATUS(port)) &
+				      DP_TP_STATUS_IDLE_DONE), 1))
+				DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+			temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+			break;
+		case DP_TRAINING_PATTERN_1:
+			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+			break;
+		}
+		I915_WRITE(DP_TP_CTL(port), temp);
+
+	} else if (HAS_PCH_CPT(dev) &&
+		   (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
 		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
 
 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1734,16 +1780,20 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
 }
 
 /* Enable corresponding port and start training pattern 1 */
-static void
+void
 intel_dp_start_link_train(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+	struct drm_device *dev = encoder->dev;
 	int i;
 	uint8_t voltage;
 	bool clock_recovery = false;
 	int voltage_tries, loop_tries;
 	uint32_t DP = intel_dp->DP;
 
+	if (IS_HASWELL(dev))
+		intel_ddi_prepare_link_retrain(encoder);
+
 	/* Write the link configuration data */
 	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
 				  intel_dp->link_configuration,
@@ -1761,8 +1811,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
 		uint32_t    signal_levels;
 
-
-		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+		if (IS_HASWELL(dev)) {
+			signal_levels = intel_dp_signal_levels_hsw(
+							intel_dp->train_set[0]);
+			DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+		} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
 			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
 		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1770,23 +1823,24 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
 		} else {
 			signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
-			DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
+		DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
+			      signal_levels);
 
+		/* Set training pattern 1 */
 		if (!intel_dp_set_link_train(intel_dp, DP,
 					     DP_TRAINING_PATTERN_1 |
 					     DP_LINK_SCRAMBLING_DISABLE))
 			break;
-		/* Set training pattern 1 */
 
-		udelay(100);
+		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
 			DRM_ERROR("failed to get link status\n");
 			break;
 		}
 
-		if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
 			DRM_DEBUG_KMS("clock recovery OK\n");
 			clock_recovery = true;
 			break;
@@ -1825,10 +1879,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 	intel_dp->DP = DP;
 }
 
-static void
+void
 intel_dp_complete_link_train(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	bool channel_eq = false;
 	int tries, cr_tries;
 	uint32_t DP = intel_dp->DP;
@@ -1848,7 +1902,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 			break;
 		}
 
-		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+		if (IS_HASWELL(dev)) {
+			signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
+			DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+		} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
 			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
 		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1865,18 +1922,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 					     DP_LINK_SCRAMBLING_DISABLE))
 			break;
 
-		udelay(400);
+		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
 		if (!intel_dp_get_link_status(intel_dp, link_status))
 			break;
 
 		/* Make sure clock is still ok */
-		if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
 			intel_dp_start_link_train(intel_dp);
 			cr_tries++;
 			continue;
 		}
 
-		if (intel_channel_eq_ok(intel_dp, link_status)) {
+		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
 			channel_eq = true;
 			break;
 		}
@@ -1895,16 +1952,38 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 		++tries;
 	}
 
+	if (channel_eq)
+		DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
+
 	intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
 intel_dp_link_down(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t DP = intel_dp->DP;
 
+	/*
+	 * DDI code has a strict mode set sequence and we should try to respect
+	 * it, otherwise we might hang the machine in many different ways. So we
+	 * really should be disabling the port only on a complete crtc_disable
+	 * sequence. This function is just called under two conditions on DDI
+	 * code:
+	 * - Link train failed while doing crtc_enable, and on this case we
+	 *   really should respect the mode set sequence and wait for a
+	 *   crtc_disable.
+	 * - Someone turned the monitor off and intel_dp_check_link_status
+	 *   called us. We don't need to disable the whole port on this case, so
+	 *   when someone turns the monitor on again,
+	 *   intel_ddi_prepare_link_retrain will take care of redoing the link
+	 *   train.
+	 */
+	if (IS_HASWELL(dev))
+		return;
+
 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
 		return;
 
@@ -1923,7 +2002,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 
 	if (HAS_PCH_IBX(dev) &&
 	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
-		struct drm_crtc *crtc = intel_dp->base.base.crtc;
+		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
 
 		/* Hardware workaround: leaving our transcoder select
 		 * set to transcoder B while it's off will prevent the
@@ -2024,7 +2103,7 @@ static void
 intel_dp_handle_test_request(struct intel_dp *intel_dp)
 {
 	/* NAK by default */
-	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
+	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
 }
 
 /*
@@ -2036,16 +2115,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
  *  4. Check link status on receipt of hot-plug interrupt
  */
 
-static void
+void
 intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
+	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 	u8 sink_irq_vector;
 	u8 link_status[DP_LINK_STATUS_SIZE];
 
-	if (!intel_dp->base.connectors_active)
+	if (!intel_encoder->connectors_active)
 		return;
 
-	if (WARN_ON(!intel_dp->base.base.crtc))
+	if (WARN_ON(!intel_encoder->base.crtc))
 		return;
 
 	/* Try to read receiver status if the link appears to be up */
@@ -2074,9 +2154,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
 	}
 
-	if (!intel_channel_eq_ok(intel_dp, link_status)) {
+	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
-			      drm_get_encoder_name(&intel_dp->base.base));
+			      drm_get_encoder_name(&intel_encoder->base));
 		intel_dp_start_link_train(intel_dp);
 		intel_dp_complete_link_train(intel_dp);
 	}
@@ -2125,11 +2205,12 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 static enum drm_connector_status
 ironlake_dp_detect(struct intel_dp *intel_dp)
 {
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	enum drm_connector_status status;
 
 	/* Can't disconnect eDP, but you can close the lid... */
 	if (is_edp(intel_dp)) {
-		status = intel_panel_detect(intel_dp->base.base.dev);
+		status = intel_panel_detect(dev);
 		if (status == connector_status_unknown)
 			status = connector_status_connected;
 		return status;
@@ -2141,7 +2222,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
 static enum drm_connector_status
 g4x_dp_detect(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t bit;
 
@@ -2168,44 +2249,45 @@ g4x_dp_detect(struct intel_dp *intel_dp)
 static struct edid *
 intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	struct edid	*edid;
-	int size;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
 
-	if (is_edp(intel_dp)) {
-		if (!intel_dp->edid)
+	/* use cached edid if we have one */
+	if (intel_connector->edid) {
+		struct edid *edid;
+		int size;
+
+		/* invalid edid */
+		if (IS_ERR(intel_connector->edid))
 			return NULL;
 
-		size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+		size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
 		edid = kmalloc(size, GFP_KERNEL);
 		if (!edid)
 			return NULL;
 
-		memcpy(edid, intel_dp->edid, size);
+		memcpy(edid, intel_connector->edid, size);
 		return edid;
 	}
 
-	edid = drm_get_edid(connector, adapter);
-	return edid;
+	return drm_get_edid(connector, adapter);
 }
 
 static int
 intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	int	ret;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
 
-	if (is_edp(intel_dp)) {
-		drm_mode_connector_update_edid_property(connector,
-							intel_dp->edid);
-		ret = drm_add_edid_modes(connector, intel_dp->edid);
-		drm_edid_to_eld(connector,
-				intel_dp->edid);
-		return intel_dp->edid_mode_count;
+	/* use cached edid if we have one */
+	if (intel_connector->edid) {
+		/* invalid edid */
+		if (IS_ERR(intel_connector->edid))
+			return 0;
+
+		return intel_connector_update_modes(connector,
+						    intel_connector->edid);
 	}
 
-	ret = intel_ddc_get_modes(connector, adapter);
-	return ret;
+	return intel_ddc_get_modes(connector, adapter);
 }
 
 
@@ -2219,9 +2301,12 @@ static enum drm_connector_status
 intel_dp_detect(struct drm_connector *connector, bool force)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	struct drm_device *dev = intel_dp->base.base.dev;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = connector->dev;
 	enum drm_connector_status status;
 	struct edid *edid = NULL;
+	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
 	intel_dp->has_audio = false;
 
@@ -2230,10 +2315,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 	else
 		status = g4x_dp_detect(intel_dp);
 
-	DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
-		      intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
-		      intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
-		      intel_dp->dpcd[6], intel_dp->dpcd[7]);
+	hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+			   32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
 
 	if (status != connector_status_connected)
 		return status;
@@ -2250,49 +2334,31 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 		}
 	}
 
+	if (intel_encoder->type != INTEL_OUTPUT_EDP)
+		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
 	return connector_status_connected;
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	struct drm_device *dev = intel_dp->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_device *dev = connector->dev;
 	int ret;
 
 	/* We should parse the EDID data and find out if it has an audio sink
 	 */
 
 	ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
-	if (ret) {
-		if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
-			struct drm_display_mode *newmode;
-			list_for_each_entry(newmode, &connector->probed_modes,
-					    head) {
-				if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
-					intel_dp->panel_fixed_mode =
-						drm_mode_duplicate(dev, newmode);
-					break;
-				}
-			}
-		}
+	if (ret)
 		return ret;
-	}
 
-	/* if eDP has no EDID, try to use fixed panel mode from VBT */
-	if (is_edp(intel_dp)) {
-		/* initialize panel mode from VBT if available for eDP */
-		if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
-			intel_dp->panel_fixed_mode =
-				drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
-			if (intel_dp->panel_fixed_mode) {
-				intel_dp->panel_fixed_mode->type |=
-					DRM_MODE_TYPE_PREFERRED;
-			}
-		}
-		if (intel_dp->panel_fixed_mode) {
-			struct drm_display_mode *mode;
-			mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+	/* if eDP has no EDID, fall back to fixed mode */
+	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+		struct drm_display_mode *mode;
+		mode = drm_mode_duplicate(dev,
+					  intel_connector->panel.fixed_mode);
+		if (mode) {
 			drm_mode_probed_add(connector, mode);
 			return 1;
 		}
@@ -2322,10 +2388,12 @@ intel_dp_set_property(struct drm_connector *connector,
 		      uint64_t val)
 {
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
-	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 	int ret;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret)
 		return ret;
 
@@ -2358,11 +2426,27 @@ intel_dp_set_property(struct drm_connector *connector,
 		goto done;
 	}
 
+	if (is_edp(intel_dp) &&
+	    property == connector->dev->mode_config.scaling_mode_property) {
+		if (val == DRM_MODE_SCALE_NONE) {
+			DRM_DEBUG_KMS("no scaling not supported\n");
+			return -EINVAL;
+		}
+
+		if (intel_connector->panel.fitting_mode == val) {
+			/* the eDP scaling property is not changed */
+			return 0;
+		}
+		intel_connector->panel.fitting_mode = val;
+
+		goto done;
+	}
+
 	return -EINVAL;
 
 done:
-	if (intel_dp->base.base.crtc) {
-		struct drm_crtc *crtc = intel_dp->base.base.crtc;
+	if (intel_encoder->base.crtc) {
+		struct drm_crtc *crtc = intel_encoder->base.crtc;
 		intel_set_mode(crtc, &crtc->mode,
 			       crtc->x, crtc->y, crtc->fb);
 	}
@@ -2375,27 +2459,33 @@ intel_dp_destroy(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
 
-	if (is_edp(intel_dp))
+	if (!IS_ERR_OR_NULL(intel_connector->edid))
+		kfree(intel_connector->edid);
+
+	if (is_edp(intel_dp)) {
 		intel_panel_destroy_backlight(dev);
+		intel_panel_fini(&intel_connector->panel);
+	}
 
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
 
-static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
 
 	i2c_del_adapter(&intel_dp->adapter);
 	drm_encoder_cleanup(encoder);
 	if (is_edp(intel_dp)) {
-		kfree(intel_dp->edid);
 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
 		ironlake_panel_vdd_off_sync(intel_dp);
 	}
-	kfree(intel_dp);
+	kfree(intel_dig_port);
 }
 
 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -2425,7 +2515,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
 static void
 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
 {
-	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
 	intel_dp_check_link_status(intel_dp);
 }
@@ -2435,13 +2525,14 @@ int
 intel_trans_dp_port_sel(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *encoder;
+	struct intel_encoder *intel_encoder;
+	struct intel_dp *intel_dp;
 
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
-		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
-		    intel_dp->base.type == INTEL_OUTPUT_EDP)
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		    intel_encoder->type == INTEL_OUTPUT_EDP)
 			return intel_dp->output_reg;
 	}
 
@@ -2471,78 +2562,191 @@ bool intel_dpd_is_edp(struct drm_device *dev)
 static void
 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
 {
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
 	intel_attach_force_audio_property(connector);
 	intel_attach_broadcast_rgb_property(connector);
+
+	if (is_edp(intel_dp)) {
+		drm_mode_create_scaling_mode_property(connector->dev);
+		drm_object_attach_property(
+			&connector->base,
+			connector->dev->mode_config.scaling_mode_property,
+			DRM_MODE_SCALE_ASPECT);
+		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+	}
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+				    struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct edp_power_seq cur, vbt, spec, final;
+	u32 pp_on, pp_off, pp_div, pp;
+
+	/* Workaround: Need to write PP_CONTROL with the unlock key as
+	 * the very first thing. */
+	pp = ironlake_get_pp_control(dev_priv);
+	I915_WRITE(PCH_PP_CONTROL, pp);
+
+	pp_on = I915_READ(PCH_PP_ON_DELAYS);
+	pp_off = I915_READ(PCH_PP_OFF_DELAYS);
+	pp_div = I915_READ(PCH_PP_DIVISOR);
+
+	/* Pull timing values out of registers */
+	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+		PANEL_POWER_UP_DELAY_SHIFT;
+
+	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+		PANEL_LIGHT_ON_DELAY_SHIFT;
+
+	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+		PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+		PANEL_POWER_DOWN_DELAY_SHIFT;
+
+	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+	vbt = dev_priv->edp.pps;
+
+	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+	 * our hw here, which are all in 100usec. */
+	spec.t1_t3 = 210 * 10;
+	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+	spec.t10 = 500 * 10;
+	/* This one is special and actually in units of 100ms, but zero
+	 * based in the hw (so we need to add 100 ms). But the sw vbt
+	 * table multiplies it with 1000 to make it in units of 100usec,
+	 * too. */
+	spec.t11_t12 = (510 + 100) * 10;
+
+	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+	/* Use the max of the register settings and vbt. If both are
+	 * unset, fall back to the spec limits. */
+#define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
+				       spec.field : \
+				       max(cur.field, vbt.field))
+	assign_final(t1_t3);
+	assign_final(t8);
+	assign_final(t9);
+	assign_final(t10);
+	assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
+	intel_dp->panel_power_up_delay = get_delay(t1_t3);
+	intel_dp->backlight_on_delay = get_delay(t8);
+	intel_dp->backlight_off_delay = get_delay(t9);
+	intel_dp->panel_power_down_delay = get_delay(t10);
+	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+	/* And finally store the new values in the power sequencer. */
+	pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+		(final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+	pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+		 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+	/* Compute the divisor for the pp clock, simply match the Bspec
+	 * formula. */
+	pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
+			<< PP_REFERENCE_DIVIDER_SHIFT;
+	pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
+			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+	/* Haswell doesn't have any port selection bits for the panel
+	 * power sequencer any more. */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		if (is_cpu_edp(intel_dp))
+			pp_on |= PANEL_POWER_PORT_DP_A;
+		else
+			pp_on |= PANEL_POWER_PORT_DP_D;
+	}
+
+	I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
+	I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
+	I915_WRITE(PCH_PP_DIVISOR, pp_div);
+
+
+	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+		      intel_dp->panel_power_cycle_delay);
+
+	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+		      I915_READ(PCH_PP_ON_DELAYS),
+		      I915_READ(PCH_PP_OFF_DELAYS),
+		      I915_READ(PCH_PP_DIVISOR));
 }
 
 void
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+			struct intel_connector *intel_connector)
 {
+	struct drm_connector *connector = &intel_connector->base;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_connector *connector;
-	struct intel_dp *intel_dp;
-	struct intel_encoder *intel_encoder;
-	struct intel_connector *intel_connector;
+	struct drm_display_mode *fixed_mode = NULL;
+	enum port port = intel_dig_port->port;
 	const char *name = NULL;
 	int type;
 
-	intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
-	if (!intel_dp)
-		return;
-
-	intel_dp->output_reg = output_reg;
-	intel_dp->port = port;
 	/* Preserve the current hw state. */
 	intel_dp->DP = I915_READ(intel_dp->output_reg);
+	intel_dp->attached_connector = intel_connector;
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
-	if (!intel_connector) {
-		kfree(intel_dp);
-		return;
-	}
-	intel_encoder = &intel_dp->base;
-
-	if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+	if (HAS_PCH_SPLIT(dev) && port == PORT_D)
 		if (intel_dpd_is_edp(dev))
 			intel_dp->is_pch_edp = true;
 
-	if (output_reg == DP_A || is_pch_edp(intel_dp)) {
+	/*
+	 * FIXME : We need to initialize built-in panels before external panels.
+	 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
+	 */
+	if (IS_VALLEYVIEW(dev) && port == PORT_C) {
+		type = DRM_MODE_CONNECTOR_eDP;
+		intel_encoder->type = INTEL_OUTPUT_EDP;
+	} else if (port == PORT_A || is_pch_edp(intel_dp)) {
 		type = DRM_MODE_CONNECTOR_eDP;
 		intel_encoder->type = INTEL_OUTPUT_EDP;
 	} else {
+		/* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
+		 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
+		 * rewrite it.
+		 */
 		type = DRM_MODE_CONNECTOR_DisplayPort;
-		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
 	}
 
-	connector = &intel_connector->base;
 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
-
-	intel_encoder->cloneable = false;
+	connector->interlace_allowed = true;
+	connector->doublescan_allowed = 0;
 
 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
 			  ironlake_panel_vdd_work);
 
-	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-
-	connector->interlace_allowed = true;
-	connector->doublescan_allowed = 0;
-
-	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
-			 DRM_MODE_ENCODER_TMDS);
-	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
-
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	drm_sysfs_connector_add(connector);
 
-	intel_encoder->enable = intel_enable_dp;
-	intel_encoder->pre_enable = intel_pre_enable_dp;
-	intel_encoder->disable = intel_disable_dp;
-	intel_encoder->post_disable = intel_post_disable_dp;
-	intel_encoder->get_hw_state = intel_dp_get_hw_state;
-	intel_connector->get_hw_state = intel_connector_get_hw_state;
+	if (IS_HASWELL(dev))
+		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+	else
+		intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 
 	/* Set up the DDC bus. */
 	switch (port) {
@@ -2566,66 +2770,15 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 		break;
 	}
 
-	/* Cache some DPCD data in the eDP case */
-	if (is_edp(intel_dp)) {
-		struct edp_power_seq	cur, vbt;
-		u32 pp_on, pp_off, pp_div;
-
-		pp_on = I915_READ(PCH_PP_ON_DELAYS);
-		pp_off = I915_READ(PCH_PP_OFF_DELAYS);
-		pp_div = I915_READ(PCH_PP_DIVISOR);
-
-		if (!pp_on || !pp_off || !pp_div) {
-			DRM_INFO("bad panel power sequencing delays, disabling panel\n");
-			intel_dp_encoder_destroy(&intel_dp->base.base);
-			intel_dp_destroy(&intel_connector->base);
-			return;
-		}
-
-		/* Pull timing values out of registers */
-		cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
-			PANEL_POWER_UP_DELAY_SHIFT;
-
-		cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
-			PANEL_LIGHT_ON_DELAY_SHIFT;
-
-		cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
-			PANEL_LIGHT_OFF_DELAY_SHIFT;
-
-		cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
-			PANEL_POWER_DOWN_DELAY_SHIFT;
-
-		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
-			       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
-
-		DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-			      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
-
-		vbt = dev_priv->edp.pps;
-
-		DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-			      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
-
-#define get_delay(field)	((max(cur.field, vbt.field) + 9) / 10)
-
-		intel_dp->panel_power_up_delay = get_delay(t1_t3);
-		intel_dp->backlight_on_delay = get_delay(t8);
-		intel_dp->backlight_off_delay = get_delay(t9);
-		intel_dp->panel_power_down_delay = get_delay(t10);
-		intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-
-		DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
-			      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
-			      intel_dp->panel_power_cycle_delay);
-
-		DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
-			      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-	}
+	if (is_edp(intel_dp))
+		intel_dp_init_panel_power_sequencer(dev, intel_dp);
 
 	intel_dp_i2c_init(intel_dp, intel_connector, name);
 
+	/* Cache DPCD and EDID for edp. */
 	if (is_edp(intel_dp)) {
 		bool ret;
+		struct drm_display_mode *scan;
 		struct edid *edid;
 
 		ironlake_edp_panel_vdd_on(intel_dp);
@@ -2640,29 +2793,47 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 		} else {
 			/* if this fails, presume the device is a ghost */
 			DRM_INFO("failed to retrieve link info, disabling eDP\n");
-			intel_dp_encoder_destroy(&intel_dp->base.base);
-			intel_dp_destroy(&intel_connector->base);
+			intel_dp_encoder_destroy(&intel_encoder->base);
+			intel_dp_destroy(connector);
 			return;
 		}
 
 		ironlake_edp_panel_vdd_on(intel_dp);
 		edid = drm_get_edid(connector, &intel_dp->adapter);
 		if (edid) {
-			drm_mode_connector_update_edid_property(connector,
-								edid);
-			intel_dp->edid_mode_count =
-				drm_add_edid_modes(connector, edid);
-			drm_edid_to_eld(connector, edid);
-			intel_dp->edid = edid;
+			if (drm_add_edid_modes(connector, edid)) {
+				drm_mode_connector_update_edid_property(connector, edid);
+				drm_edid_to_eld(connector, edid);
+			} else {
+				kfree(edid);
+				edid = ERR_PTR(-EINVAL);
+			}
+		} else {
+			edid = ERR_PTR(-ENOENT);
 		}
+		intel_connector->edid = edid;
+
+		/* prefer fixed mode from EDID if available */
+		list_for_each_entry(scan, &connector->probed_modes, head) {
+			if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+				fixed_mode = drm_mode_duplicate(dev, scan);
+				break;
+			}
+		}
+
+		/* fallback to VBT if available for eDP */
+		if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+			fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+			if (fixed_mode)
+				fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+		}
+
 		ironlake_edp_panel_vdd_off(intel_dp, false);
 	}
 
-	intel_encoder->hot_plug = intel_dp_hot_plug;
-
 	if (is_edp(intel_dp)) {
-		dev_priv->int_edp_connector = connector;
-		intel_panel_setup_backlight(dev);
+		intel_panel_init(&intel_connector->panel, fixed_mode);
+		intel_panel_setup_backlight(connector);
 	}
 
 	intel_dp_add_properties(intel_dp, connector);
@@ -2676,3 +2847,45 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
 	}
 }
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+	struct intel_digital_port *intel_dig_port;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *intel_connector;
+
+	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	if (!intel_dig_port)
+		return;
+
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_dig_port);
+		return;
+	}
+
+	intel_encoder = &intel_dig_port->base;
+	encoder = &intel_encoder->base;
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+	intel_encoder->enable = intel_enable_dp;
+	intel_encoder->pre_enable = intel_pre_enable_dp;
+	intel_encoder->disable = intel_disable_dp;
+	intel_encoder->post_disable = intel_post_disable_dp;
+	intel_encoder->get_hw_state = intel_dp_get_hw_state;
+
+	intel_dig_port->port = port;
+	intel_dig_port->dp.output_reg = output_reg;
+
+	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	intel_encoder->cloneable = false;
+	intel_encoder->hot_plug = intel_dp_hot_plug;
+
+	intel_dp_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fe7142502f43..8a1bd4a3ad0d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -94,6 +94,7 @@
 #define INTEL_OUTPUT_HDMI 6
 #define INTEL_OUTPUT_DISPLAYPORT 7
 #define INTEL_OUTPUT_EDP 8
+#define INTEL_OUTPUT_UNKNOWN 9
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
@@ -163,6 +164,11 @@ struct intel_encoder {
 	int crtc_mask;
 };
 
+struct intel_panel {
+	struct drm_display_mode *fixed_mode;
+	int fitting_mode;
+};
+
 struct intel_connector {
 	struct drm_connector base;
 	/*
@@ -179,12 +185,19 @@ struct intel_connector {
 	/* Reads out the current hw, returning true if the connector is enabled
 	 * and active (i.e. dpms ON state). */
 	bool (*get_hw_state)(struct intel_connector *);
+
+	/* Panel info for eDP and LVDS */
+	struct intel_panel panel;
+
+	/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+	struct edid *edid;
 };
 
 struct intel_crtc {
 	struct drm_crtc base;
 	enum pipe pipe;
 	enum plane plane;
+	enum transcoder cpu_transcoder;
 	u8 lut_r[256], lut_g[256], lut_b[256];
 	/*
 	 * Whether the crtc and the connected output pipeline is active. Implies
@@ -198,6 +211,8 @@ struct intel_crtc {
 	struct intel_unpin_work *unpin_work;
 	int fdi_lanes;
 
+	atomic_t unpin_work_count;
+
 	/* Display surface base address adjustement for pageflips. Note that on
 	 * gen4+ this only adjusts up to a tile, offsets within a tile are
 	 * handled in the hw itself (with the TILEOFF register). */
@@ -212,12 +227,14 @@ struct intel_crtc {
 
 	/* We can share PLLs across outputs if the timings match */
 	struct intel_pch_pll *pch_pll;
+	uint32_t ddi_pll_sel;
 };
 
 struct intel_plane {
 	struct drm_plane base;
 	enum pipe pipe;
 	struct drm_i915_gem_object *obj;
+	bool can_scale;
 	int max_downscale;
 	u32 lut_r[1024], lut_g[1024], lut_b[1024];
 	void (*update_plane)(struct drm_plane *plane,
@@ -317,10 +334,8 @@ struct dip_infoframe {
 } __attribute__((packed));
 
 struct intel_hdmi {
-	struct intel_encoder base;
 	u32 sdvox_reg;
 	int ddc_bus;
-	int ddi_port;
 	uint32_t color_range;
 	bool has_hdmi_sink;
 	bool has_audio;
@@ -331,18 +346,15 @@ struct intel_hdmi {
 			       struct drm_display_mode *adjusted_mode);
 };
 
-#define DP_RECEIVER_CAP_SIZE		0xf
 #define DP_MAX_DOWNSTREAM_PORTS		0x10
 #define DP_LINK_CONFIGURATION_SIZE	9
 
 struct intel_dp {
-	struct intel_encoder base;
 	uint32_t output_reg;
 	uint32_t DP;
 	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
 	bool has_audio;
 	enum hdmi_force_audio force_audio;
-	enum port port;
 	uint32_t color_range;
 	uint8_t link_bw;
 	uint8_t lane_count;
@@ -357,11 +369,16 @@ struct intel_dp {
 	int panel_power_cycle_delay;
 	int backlight_on_delay;
 	int backlight_off_delay;
-	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
 	struct delayed_work panel_vdd_work;
 	bool want_panel_vdd;
-	struct edid *edid; /* cached EDID for eDP */
-	int edid_mode_count;
+	struct intel_connector *attached_connector;
+};
+
+struct intel_digital_port {
+	struct intel_encoder base;
+	enum port port;
+	struct intel_dp dp;
+	struct intel_hdmi hdmi;
 };
 
 static inline struct drm_crtc *
@@ -380,11 +397,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
 
 struct intel_unpin_work {
 	struct work_struct work;
-	struct drm_device *dev;
+	struct drm_crtc *crtc;
 	struct drm_i915_gem_object *old_fb_obj;
 	struct drm_i915_gem_object *pending_flip_obj;
 	struct drm_pending_vblank_event *event;
-	int pending;
+	atomic_t pending;
+#define INTEL_FLIP_INACTIVE	0
+#define INTEL_FLIP_PENDING	1
+#define INTEL_FLIP_COMPLETE	2
 	bool enable_stall_check;
 };
 
@@ -395,6 +415,8 @@ struct intel_fbc_work {
 	int interval;
 };
 
+int intel_pch_rawclk(struct drm_device *dev);
+
 int intel_connector_update_modes(struct drm_connector *connector,
 				struct edid *edid);
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@@ -405,7 +427,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev,
 			    int sdvox_reg, enum port port);
+extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+				      struct intel_connector *intel_connector);
 extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode);
 extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
 			    bool is_sdvob);
@@ -418,10 +445,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
 extern bool intel_lvds_init(struct drm_device *dev);
 extern void intel_dp_init(struct drm_device *dev, int output_reg,
 			  enum port port);
+extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+				    struct intel_connector *intel_connector);
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 		 struct drm_display_mode *adjusted_mode);
+extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
+extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
+extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode);
 extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
 extern int intel_edp_target_clock(struct intel_encoder *,
 				  struct drm_display_mode *mode);
@@ -431,6 +475,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 				      enum plane plane);
 
 /* intel_panel.c */
+extern int intel_panel_init(struct intel_panel *panel,
+			    struct drm_display_mode *fixed_mode);
+extern void intel_panel_fini(struct intel_panel *panel);
+
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
 extern void intel_pch_panel_fitting(struct drm_device *dev,
@@ -439,7 +487,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
 				    struct drm_display_mode *adjusted_mode);
 extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
 extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_setup_backlight(struct drm_device *dev);
+extern int intel_panel_setup_backlight(struct drm_connector *connector);
 extern void intel_panel_enable_backlight(struct drm_device *dev,
 					 enum pipe pipe);
 extern void intel_panel_disable_backlight(struct drm_device *dev);
@@ -473,6 +521,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
 	return to_intel_connector(connector)->encoder;
 }
 
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+	struct intel_digital_port *intel_dig_port =
+		container_of(encoder, struct intel_digital_port, base.base);
+	return &intel_dig_port->dp;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+	return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+	return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
 extern void intel_connector_attach_encoder(struct intel_connector *connector,
 					   struct intel_encoder *encoder);
 extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -481,8 +554,12 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 						    struct drm_crtc *crtc);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
+extern enum transcoder
+intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+			     enum pipe pipe);
 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
 
 struct intel_load_detect_pipe {
 	struct drm_framebuffer *release_fb;
@@ -550,6 +627,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
 extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
 			 struct drm_display_mode *mode);
 
+extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+						      unsigned int bpp,
+						      unsigned int pitch);
+
 extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
 extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -573,12 +654,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
 extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
 extern void ironlake_teardown_rc6(struct drm_device *dev);
 
-extern void intel_enable_ddi(struct intel_encoder *encoder);
-extern void intel_disable_ddi(struct intel_encoder *encoder);
 extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 				   enum pipe *pipe);
-extern void intel_ddi_mode_set(struct drm_encoder *encoder,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode);
+extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+extern void intel_ddi_pll_init(struct drm_device *dev);
+extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
+extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+					      enum transcoder cpu_transcoder);
+extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
+extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+extern bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9ba0aaed7ee8..2ee9821b9d93 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -36,10 +36,15 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+	return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
 static void
 assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 {
-	struct drm_device *dev = intel_hdmi->base.base.dev;
+	struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t enabled_bits;
 
@@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
 {
-	return container_of(encoder, struct intel_hdmi, base.base);
+	struct intel_digital_port *intel_dig_port =
+		container_of(encoder, struct intel_digital_port, base.base);
+	return &intel_dig_port->hdmi;
 }
 
 static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
 {
-	return container_of(intel_attached_encoder(connector),
-			    struct intel_hdmi, base);
+	return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
 }
 
 void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -334,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
 		avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
 
+	avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+
 	intel_set_infoframe(encoder, &avi_if);
 }
 
@@ -754,16 +762,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
 	return MODE_OK;
 }
 
-static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
+bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+			   const struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode)
 {
 	return true;
 }
 
 static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
 {
-	struct drm_device *dev = intel_hdmi->base.base.dev;
+	struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t bit;
 
@@ -786,6 +794,9 @@ static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
 	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct intel_digital_port *intel_dig_port =
+		hdmi_to_dig_port(intel_hdmi);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	struct edid *edid;
 	enum drm_connector_status status = connector_status_disconnected;
@@ -814,6 +825,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
 		if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
 			intel_hdmi->has_audio =
 				(intel_hdmi->force_audio == HDMI_AUDIO_ON);
+		intel_encoder->type = INTEL_OUTPUT_HDMI;
 	}
 
 	return status;
@@ -859,10 +871,12 @@ intel_hdmi_set_property(struct drm_connector *connector,
 			uint64_t val)
 {
 	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct intel_digital_port *intel_dig_port =
+		hdmi_to_dig_port(intel_hdmi);
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	int ret;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret)
 		return ret;
 
@@ -898,8 +912,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
 	return -EINVAL;
 
 done:
-	if (intel_hdmi->base.base.crtc) {
-		struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+	if (intel_dig_port->base.base.crtc) {
+		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
 		intel_set_mode(crtc, &crtc->mode,
 			       crtc->x, crtc->y, crtc->fb);
 	}
@@ -914,12 +928,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
 	kfree(connector);
 }
 
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
-	.mode_fixup = intel_hdmi_mode_fixup,
-	.mode_set = intel_ddi_mode_set,
-	.disable = intel_encoder_noop,
-};
-
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
 	.mode_fixup = intel_hdmi_mode_fixup,
 	.mode_set = intel_hdmi_mode_set,
@@ -951,43 +959,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
 	intel_attach_broadcast_rgb_property(connector);
 }
 
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+			       struct intel_connector *intel_connector)
 {
+	struct drm_connector *connector = &intel_connector->base;
+	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_connector *connector;
-	struct intel_encoder *intel_encoder;
-	struct intel_connector *intel_connector;
-	struct intel_hdmi *intel_hdmi;
+	enum port port = intel_dig_port->port;
 
-	intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
-	if (!intel_hdmi)
-		return;
-
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
-	if (!intel_connector) {
-		kfree(intel_hdmi);
-		return;
-	}
-
-	intel_encoder = &intel_hdmi->base;
-	drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
-			 DRM_MODE_ENCODER_TMDS);
-
-	connector = &intel_connector->base;
 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_HDMIA);
 	drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
 
-	intel_encoder->type = INTEL_OUTPUT_HDMI;
-
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 	connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
-	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
-	intel_encoder->cloneable = false;
-
-	intel_hdmi->ddi_port = port;
 	switch (port) {
 	case PORT_B:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@@ -1007,8 +996,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
 		BUG();
 	}
 
-	intel_hdmi->sdvox_reg = sdvox_reg;
-
 	if (!HAS_PCH_SPLIT(dev)) {
 		intel_hdmi->write_infoframe = g4x_write_infoframe;
 		intel_hdmi->set_infoframes = g4x_set_infoframes;
@@ -1026,21 +1013,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
 		intel_hdmi->set_infoframes = cpt_set_infoframes;
 	}
 
-	if (IS_HASWELL(dev)) {
-		intel_encoder->enable = intel_enable_ddi;
-		intel_encoder->disable = intel_disable_ddi;
-		intel_encoder->get_hw_state = intel_ddi_get_hw_state;
-		drm_encoder_helper_add(&intel_encoder->base,
-				       &intel_hdmi_helper_funcs_hsw);
-	} else {
-		intel_encoder->enable = intel_enable_hdmi;
-		intel_encoder->disable = intel_disable_hdmi;
-		intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
-		drm_encoder_helper_add(&intel_encoder->base,
-				       &intel_hdmi_helper_funcs);
-	}
-	intel_connector->get_hw_state = intel_connector_get_hw_state;
-
+	if (IS_HASWELL(dev))
+		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+	else
+		intel_connector->get_hw_state = intel_connector_get_hw_state;
 
 	intel_hdmi_add_properties(intel_hdmi, connector);
 
@@ -1056,3 +1032,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
 	}
 }
+
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+{
+	struct intel_digital_port *intel_dig_port;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *intel_connector;
+
+	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	if (!intel_dig_port)
+		return;
+
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_dig_port);
+		return;
+	}
+
+	intel_encoder = &intel_dig_port->base;
+	encoder = &intel_encoder->base;
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+	intel_encoder->enable = intel_enable_hdmi;
+	intel_encoder->disable = intel_disable_hdmi;
+	intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+
+	intel_encoder->type = INTEL_OUTPUT_HDMI;
+	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	intel_encoder->cloneable = false;
+
+	intel_dig_port->port = port;
+	intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
+	intel_dig_port->dp.output_reg = 0;
+
+	intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2c6dbc0971c..3ef5af15b812 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -432,7 +432,7 @@ timeout:
 	I915_WRITE(GMBUS0 + reg_offset, 0);
 
 	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
-	bus->force_bit = true;
+	bus->force_bit = 1;
 	ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
 
 out:
@@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
 
 		/* gmbus seems to be broken on i830 */
 		if (IS_I830(dev))
-			bus->force_bit = true;
+			bus->force_bit = 1;
 
 		intel_gpio_setup(bus, port);
 
@@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
 {
 	struct intel_gmbus *bus = to_intel_gmbus(adapter);
 
-	bus->force_bit = force_bit;
+	bus->force_bit += force_bit ? 1 : -1;
+	DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+		      force_bit ? "en" : "dis", adapter->name,
+		      bus->force_bit);
 }
 
 void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index edba93b3474b..b9a660a53677 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -40,28 +40,30 @@
 #include <linux/acpi.h>
 
 /* Private structure for the integrated LVDS support */
-struct intel_lvds {
+struct intel_lvds_connector {
+	struct intel_connector base;
+
+	struct notifier_block lid_notifier;
+};
+
+struct intel_lvds_encoder {
 	struct intel_encoder base;
 
-	struct edid *edid;
-
-	int fitting_mode;
 	u32 pfit_control;
 	u32 pfit_pgm_ratios;
 	bool pfit_dirty;
 
-	struct drm_display_mode *fixed_mode;
+	struct intel_lvds_connector *attached_connector;
 };
 
-static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
 {
-	return container_of(encoder, struct intel_lvds, base.base);
+	return container_of(encoder, struct intel_lvds_encoder, base.base);
 }
 
-static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
 {
-	return container_of(intel_attached_encoder(connector),
-			    struct intel_lvds, base);
+	return container_of(connector, struct intel_lvds_connector, base.base);
 }
 
 static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
 static void intel_enable_lvds(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
-	struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, lvds_reg, stat_reg;
@@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
 
 	I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
 
-	if (intel_lvds->pfit_dirty) {
+	if (lvds_encoder->pfit_dirty) {
 		/*
 		 * Enable automatic panel scaling so that non-native modes
 		 * fill the screen.  The panel fitter should only be
@@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
 		 * register description and PRM.
 		 */
 		DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
-			      intel_lvds->pfit_control,
-			      intel_lvds->pfit_pgm_ratios);
+			      lvds_encoder->pfit_control,
+			      lvds_encoder->pfit_pgm_ratios);
 
-		I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
-		I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
-		intel_lvds->pfit_dirty = false;
+		I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
+		I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
+		lvds_encoder->pfit_dirty = false;
 	}
 
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
 static void intel_disable_lvds(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
-	struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, lvds_reg, stat_reg;
 
@@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 	if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
 		DRM_ERROR("timed out waiting for panel to power off\n");
 
-	if (intel_lvds->pfit_control) {
+	if (lvds_encoder->pfit_control) {
 		I915_WRITE(PFIT_CONTROL, 0);
-		intel_lvds->pfit_dirty = true;
+		lvds_encoder->pfit_dirty = true;
 	}
 
 	I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 static int intel_lvds_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
-	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
-	struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 
 	if (mode->hdisplay > fixed_mode->hdisplay)
 		return MODE_PANEL;
@@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-	struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+	struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
 	int pipe;
 
@@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 		return false;
 	}
 
-	if (intel_encoder_check_is_cloned(&intel_lvds->base))
+	if (intel_encoder_check_is_cloned(&lvds_encoder->base))
 		return false;
 
 	/*
@@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	 * with the panel scaling set up to source from the H/VDisplay
 	 * of the original mode.
 	 */
-	intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
+	intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+			       adjusted_mode);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+		intel_pch_panel_fitting(dev,
+					intel_connector->panel.fitting_mode,
 					mode, adjusted_mode);
 		return true;
 	}
@@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 
-	switch (intel_lvds->fitting_mode) {
+	switch (intel_connector->panel.fitting_mode) {
 	case DRM_MODE_SCALE_CENTER:
 		/*
 		 * For centered modes, we have to calculate border widths &
@@ -396,11 +402,11 @@ out:
 	if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
 		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
 
-	if (pfit_control != intel_lvds->pfit_control ||
-	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
-		intel_lvds->pfit_control = pfit_control;
-		intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
-		intel_lvds->pfit_dirty = true;
+	if (pfit_control != lvds_encoder->pfit_control ||
+	    pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
+		lvds_encoder->pfit_control = pfit_control;
+		lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
+		lvds_encoder->pfit_dirty = true;
 	}
 	dev_priv->lvds_border_bits = border;
 
@@ -449,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
  */
 static int intel_lvds_get_modes(struct drm_connector *connector)
 {
-	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+	struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
 	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *mode;
 
-	if (intel_lvds->edid)
-		return drm_add_edid_modes(connector, intel_lvds->edid);
+	/* use cached edid if we have one */
+	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+		return drm_add_edid_modes(connector, lvds_connector->base.edid);
 
-	mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+	mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
 	if (mode == NULL)
 		return 0;
 
@@ -496,10 +503,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
 static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
 			    void *unused)
 {
-	struct drm_i915_private *dev_priv =
-		container_of(nb, struct drm_i915_private, lid_notifier);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_connector *connector = dev_priv->int_lvds_connector;
+	struct intel_lvds_connector *lvds_connector =
+		container_of(nb, struct intel_lvds_connector, lid_notifier);
+	struct drm_connector *connector = &lvds_connector->base.base;
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
 		return NOTIFY_OK;
@@ -508,9 +516,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
 	 * check and update the status of LVDS connector after receiving
 	 * the LID nofication event.
 	 */
-	if (connector)
-		connector->status = connector->funcs->detect(connector,
-							     false);
+	connector->status = connector->funcs->detect(connector, false);
 
 	/* Don't force modeset on machines where it causes a GPU lockup */
 	if (dmi_check_system(intel_no_modeset_on_lid))
@@ -526,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
 	dev_priv->modeset_on_lid = 0;
 
 	mutex_lock(&dev->mode_config.mutex);
-	intel_modeset_check_state(dev);
+	intel_modeset_setup_hw_state(dev, true);
 	mutex_unlock(&dev->mode_config.mutex);
 
 	return NOTIFY_OK;
@@ -541,13 +547,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
  */
 static void intel_lvds_destroy(struct drm_connector *connector)
 {
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_lvds_connector *lvds_connector =
+		to_lvds_connector(connector);
 
-	intel_panel_destroy_backlight(dev);
+	if (lvds_connector->lid_notifier.notifier_call)
+		acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+
+	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+		kfree(lvds_connector->base.edid);
+
+	intel_panel_destroy_backlight(connector->dev);
+	intel_panel_fini(&lvds_connector->base.panel);
 
-	if (dev_priv->lid_notifier.notifier_call)
-		acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
@@ -557,22 +568,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
 				   struct drm_property *property,
 				   uint64_t value)
 {
-	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_device *dev = connector->dev;
 
 	if (property == dev->mode_config.scaling_mode_property) {
-		struct drm_crtc *crtc = intel_lvds->base.base.crtc;
+		struct drm_crtc *crtc;
 
 		if (value == DRM_MODE_SCALE_NONE) {
 			DRM_DEBUG_KMS("no scaling not supported\n");
 			return -EINVAL;
 		}
 
-		if (intel_lvds->fitting_mode == value) {
+		if (intel_connector->panel.fitting_mode == value) {
 			/* the LVDS scaling property is not changed */
 			return 0;
 		}
-		intel_lvds->fitting_mode = value;
+		intel_connector->panel.fitting_mode = value;
+
+		crtc = intel_attached_encoder(connector)->base.crtc;
 		if (crtc && crtc->enabled) {
 			/*
 			 * If the CRTC is enabled, the display will be changed
@@ -912,12 +925,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
 bool intel_lvds_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_lvds *intel_lvds;
+	struct intel_lvds_encoder *lvds_encoder;
 	struct intel_encoder *intel_encoder;
+	struct intel_lvds_connector *lvds_connector;
 	struct intel_connector *intel_connector;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 	struct drm_display_mode *scan; /* *modes, *bios_mode; */
+	struct drm_display_mode *fixed_mode = NULL;
+	struct edid *edid;
 	struct drm_crtc *crtc;
 	u32 lvds;
 	int pipe;
@@ -945,23 +961,25 @@ bool intel_lvds_init(struct drm_device *dev)
 		}
 	}
 
-	intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
-	if (!intel_lvds) {
+	lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+	if (!lvds_encoder)
+		return false;
+
+	lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+	if (!lvds_connector) {
+		kfree(lvds_encoder);
 		return false;
 	}
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
-	if (!intel_connector) {
-		kfree(intel_lvds);
-		return false;
-	}
+	lvds_encoder->attached_connector = lvds_connector;
 
 	if (!HAS_PCH_SPLIT(dev)) {
-		intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+		lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
 	}
 
-	intel_encoder = &intel_lvds->base;
+	intel_encoder = &lvds_encoder->base;
 	encoder = &intel_encoder->base;
+	intel_connector = &lvds_connector->base;
 	connector = &intel_connector->base;
 	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
 			   DRM_MODE_CONNECTOR_LVDS);
@@ -993,14 +1011,10 @@ bool intel_lvds_init(struct drm_device *dev)
 
 	/* create the scaling mode property */
 	drm_mode_create_scaling_mode_property(dev);
-	/*
-	 * the initial panel fitting mode will be FULL_SCREEN.
-	 */
-
-	drm_connector_attach_property(&intel_connector->base,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.scaling_mode_property,
 				      DRM_MODE_SCALE_ASPECT);
-	intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
+	intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
 	/*
 	 * LVDS discovery:
 	 * 1) check for EDID on DDC
@@ -1015,20 +1029,21 @@ bool intel_lvds_init(struct drm_device *dev)
 	 * Attempt to get the fixed panel mode from DDC.  Assume that the
 	 * preferred mode is the right one.
 	 */
-	intel_lvds->edid = drm_get_edid(connector,
-					intel_gmbus_get_adapter(dev_priv,
-								pin));
-	if (intel_lvds->edid) {
-		if (drm_add_edid_modes(connector,
-				       intel_lvds->edid)) {
+	edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+	if (edid) {
+		if (drm_add_edid_modes(connector, edid)) {
 			drm_mode_connector_update_edid_property(connector,
-								intel_lvds->edid);
+								edid);
 		} else {
-			kfree(intel_lvds->edid);
-			intel_lvds->edid = NULL;
+			kfree(edid);
+			edid = ERR_PTR(-EINVAL);
 		}
+	} else {
+		edid = ERR_PTR(-ENOENT);
 	}
-	if (!intel_lvds->edid) {
+	lvds_connector->base.edid = edid;
+
+	if (IS_ERR_OR_NULL(edid)) {
 		/* Didn't get an EDID, so
 		 * Set wide sync ranges so we get all modes
 		 * handed to valid_mode for checking
@@ -1041,22 +1056,26 @@ bool intel_lvds_init(struct drm_device *dev)
 
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
-			intel_lvds->fixed_mode =
-				drm_mode_duplicate(dev, scan);
-			intel_find_lvds_downclock(dev,
-						  intel_lvds->fixed_mode,
-						  connector);
-			goto out;
+			DRM_DEBUG_KMS("using preferred mode from EDID: ");
+			drm_mode_debug_printmodeline(scan);
+
+			fixed_mode = drm_mode_duplicate(dev, scan);
+			if (fixed_mode) {
+				intel_find_lvds_downclock(dev, fixed_mode,
+							  connector);
+				goto out;
+			}
 		}
 	}
 
 	/* Failed to get EDID, what about VBT? */
 	if (dev_priv->lfp_lvds_vbt_mode) {
-		intel_lvds->fixed_mode =
-			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
-		if (intel_lvds->fixed_mode) {
-			intel_lvds->fixed_mode->type |=
-				DRM_MODE_TYPE_PREFERRED;
+		DRM_DEBUG_KMS("using mode from VBT: ");
+		drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
+
+		fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+		if (fixed_mode) {
+			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 			goto out;
 		}
 	}
@@ -1076,16 +1095,17 @@ bool intel_lvds_init(struct drm_device *dev)
 	crtc = intel_get_crtc_for_pipe(dev, pipe);
 
 	if (crtc && (lvds & LVDS_PORT_EN)) {
-		intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
-		if (intel_lvds->fixed_mode) {
-			intel_lvds->fixed_mode->type |=
-				DRM_MODE_TYPE_PREFERRED;
+		fixed_mode = intel_crtc_mode_get(dev, crtc);
+		if (fixed_mode) {
+			DRM_DEBUG_KMS("using current (BIOS) mode: ");
+			drm_mode_debug_printmodeline(fixed_mode);
+			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 			goto out;
 		}
 	}
 
 	/* If we still don't have a mode after all that, give up. */
-	if (!intel_lvds->fixed_mode)
+	if (!fixed_mode)
 		goto failed;
 
 out:
@@ -1100,16 +1120,15 @@ out:
 		I915_WRITE(PP_CONTROL,
 			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
 	}
-	dev_priv->lid_notifier.notifier_call = intel_lid_notify;
-	if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+	lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+	if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
 		DRM_DEBUG_KMS("lid notifier registration failed\n");
-		dev_priv->lid_notifier.notifier_call = NULL;
+		lvds_connector->lid_notifier.notifier_call = NULL;
 	}
-	/* keep the LVDS connector */
-	dev_priv->int_lvds_connector = connector;
 	drm_sysfs_connector_add(connector);
 
-	intel_panel_setup_backlight(dev);
+	intel_panel_init(&intel_connector->panel, fixed_mode);
+	intel_panel_setup_backlight(connector);
 
 	return true;
 
@@ -1117,7 +1136,9 @@ failed:
 	DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
 	drm_connector_cleanup(connector);
 	drm_encoder_cleanup(encoder);
-	kfree(intel_lvds);
-	kfree(intel_connector);
+	if (fixed_mode)
+		drm_mode_destroy(dev, fixed_mode);
+	kfree(lvds_encoder);
+	kfree(lvds_connector);
 	return false;
 }
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cabd84bf66eb..b00f1c83adce 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
 	drm_edid_to_eld(connector, edid);
-	kfree(edid);
 
 	return ret;
 }
@@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
 			struct i2c_adapter *adapter)
 {
 	struct edid *edid;
+	int ret;
 
 	edid = drm_get_edid(connector, adapter);
 	if (!edid)
 		return 0;
 
-	return intel_connector_update_modes(connector, edid);
+	ret = intel_connector_update_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
 }
 
 static const struct drm_prop_enum_list force_audio_names[] = {
@@ -94,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
 
 		dev_priv->force_audio_property = prop;
 	}
-	drm_connector_attach_property(connector, prop, 0);
+	drm_object_attach_property(&connector->base, prop, 0);
 }
 
 static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -121,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
 		dev_priv->broadcast_rgb_property = prop;
 	}
 
-	drm_connector_attach_property(connector, prop, 0);
+	drm_object_attach_property(&connector->base, prop, 0);
 }
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5530413213d8..7741c22c934c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -154,6 +154,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
 	u32 max;
 
+	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
 	if (!(bclp & ASLE_BCLP_VALID))
 		return ASLE_BACKLIGHT_FAILED;
 
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e2aacd329545..bee8cb6108a7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -130,32 +130,34 @@ static int is_backlight_combination_mode(struct drm_device *dev)
 	return 0;
 }
 
-static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
 
 	/* Restore the CTL value if it lost, e.g. GPU reset */
 
 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
 		val = I915_READ(BLC_PWM_PCH_CTL2);
-		if (dev_priv->saveBLC_PWM_CTL2 == 0) {
-			dev_priv->saveBLC_PWM_CTL2 = val;
+		if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
+			dev_priv->regfile.saveBLC_PWM_CTL2 = val;
 		} else if (val == 0) {
-			I915_WRITE(BLC_PWM_PCH_CTL2,
-				   dev_priv->saveBLC_PWM_CTL2);
-			val = dev_priv->saveBLC_PWM_CTL2;
+			val = dev_priv->regfile.saveBLC_PWM_CTL2;
+			I915_WRITE(BLC_PWM_PCH_CTL2, val);
 		}
 	} else {
 		val = I915_READ(BLC_PWM_CTL);
-		if (dev_priv->saveBLC_PWM_CTL == 0) {
-			dev_priv->saveBLC_PWM_CTL = val;
-			dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+			dev_priv->regfile.saveBLC_PWM_CTL = val;
+			if (INTEL_INFO(dev)->gen >= 4)
+				dev_priv->regfile.saveBLC_PWM_CTL2 =
+					I915_READ(BLC_PWM_CTL2);
 		} else if (val == 0) {
-			I915_WRITE(BLC_PWM_CTL,
-				   dev_priv->saveBLC_PWM_CTL);
-			I915_WRITE(BLC_PWM_CTL2,
-				   dev_priv->saveBLC_PWM_CTL2);
-			val = dev_priv->saveBLC_PWM_CTL;
+			val = dev_priv->regfile.saveBLC_PWM_CTL;
+			I915_WRITE(BLC_PWM_CTL, val);
+			if (INTEL_INFO(dev)->gen >= 4)
+				I915_WRITE(BLC_PWM_CTL2,
+					   dev_priv->regfile.saveBLC_PWM_CTL2);
 		}
 	}
 
@@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
 
 static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 max;
 
-	max = i915_read_blc_pwm_ctl(dev_priv);
+	max = i915_read_blc_pwm_ctl(dev);
 
 	if (HAS_PCH_SPLIT(dev)) {
 		max >>= 16;
@@ -275,7 +276,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
 	}
 
 	tmp = I915_READ(BLC_PWM_CTL);
-	if (INTEL_INFO(dev)->gen < 4) 
+	if (INTEL_INFO(dev)->gen < 4)
 		level <<= 1;
 	tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
 	I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -374,26 +375,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
 enum drm_connector_status
 intel_panel_detect(struct drm_device *dev)
 {
-#if 0
 	struct drm_i915_private *dev_priv = dev->dev_private;
-#endif
 
-	if (i915_panel_ignore_lid)
-		return i915_panel_ignore_lid > 0 ?
-			connector_status_connected :
-			connector_status_disconnected;
-
-	/* opregion lid state on HP 2540p is wrong at boot up,
-	 * appears to be either the BIOS or Linux ACPI fault */
-#if 0
 	/* Assume that the BIOS does not lie through the OpRegion... */
-	if (dev_priv->opregion.lid_state)
+	if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
 		return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
 			connector_status_connected :
 			connector_status_disconnected;
-#endif
+	}
 
-	return connector_status_unknown;
+	switch (i915_panel_ignore_lid) {
+	case -2:
+		return connector_status_connected;
+	case -1:
+		return connector_status_disconnected;
+	default:
+		return connector_status_unknown;
+	}
 }
 
 #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -416,21 +414,14 @@ static const struct backlight_ops intel_panel_bl_ops = {
 	.get_brightness = intel_panel_get_brightness,
 };
 
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
 {
+	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct backlight_properties props;
-	struct drm_connector *connector;
 
 	intel_panel_init_backlight(dev);
 
-	if (dev_priv->int_lvds_connector)
-		connector = dev_priv->int_lvds_connector;
-	else if (dev_priv->int_edp_connector)
-		connector = dev_priv->int_edp_connector;
-	else
-		return -ENODEV;
-
 	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = _intel_panel_get_max_backlight(dev);
@@ -460,9 +451,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
 		backlight_device_unregister(dev_priv->backlight);
 }
 #else
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
 {
-	intel_panel_init_backlight(dev);
+	intel_panel_init_backlight(connector->dev);
 	return 0;
 }
 
@@ -471,3 +462,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
 	return;
 }
 #endif
+
+int intel_panel_init(struct intel_panel *panel,
+		     struct drm_display_mode *fixed_mode)
+{
+	panel->fixed_mode = fixed_mode;
+
+	return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+	struct intel_connector *intel_connector =
+		container_of(panel, struct intel_connector, panel);
+
+	if (panel->fixed_mode)
+		drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 442968f8b201..496caa73eb70 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1325,10 +1325,11 @@ static void valleyview_update_wm(struct drm_device *dev)
 		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
 		   planea_wm);
 	I915_WRITE(DSPFW2,
-		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
 		   (cursora_wm << DSPFW_CURSORA_SHIFT));
 	I915_WRITE(DSPFW3,
-		   (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+		   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
 static void g4x_update_wm(struct drm_device *dev)
@@ -1374,11 +1375,11 @@ static void g4x_update_wm(struct drm_device *dev)
 		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
 		   planea_wm);
 	I915_WRITE(DSPFW2,
-		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
 		   (cursora_wm << DSPFW_CURSORA_SHIFT));
 	/* HPLL off in SR has some issues on G4x... disable it */
 	I915_WRITE(DSPFW3,
-		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+		   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
@@ -1468,9 +1469,12 @@ static void i9xx_update_wm(struct drm_device *dev)
 	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
 	crtc = intel_get_crtc_for_plane(dev, 0);
 	if (crtc->enabled && crtc->fb) {
+		int cpp = crtc->fb->bits_per_pixel / 8;
+		if (IS_GEN2(dev))
+			cpp = 4;
+
 		planea_wm = intel_calculate_wm(crtc->mode.clock,
-					       wm_info, fifo_size,
-					       crtc->fb->bits_per_pixel / 8,
+					       wm_info, fifo_size, cpp,
 					       latency_ns);
 		enabled = crtc;
 	} else
@@ -1479,9 +1483,12 @@ static void i9xx_update_wm(struct drm_device *dev)
 	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
 	crtc = intel_get_crtc_for_plane(dev, 1);
 	if (crtc->enabled && crtc->fb) {
+		int cpp = crtc->fb->bits_per_pixel / 8;
+		if (IS_GEN2(dev))
+			cpp = 4;
+
 		planeb_wm = intel_calculate_wm(crtc->mode.clock,
-					       wm_info, fifo_size,
-					       crtc->fb->bits_per_pixel / 8,
+					       wm_info, fifo_size, cpp,
 					       latency_ns);
 		if (enabled == NULL)
 			enabled = crtc;
@@ -1571,8 +1578,7 @@ static void i830_update_wm(struct drm_device *dev)
 
 	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
 				       dev_priv->display.get_fifo_size(dev, 0),
-				       crtc->fb->bits_per_pixel / 8,
-				       latency_ns);
+				       4, latency_ns);
 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
 	fwater_lo |= (3<<8) | planea_wm;
 
@@ -2323,7 +2329,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 limits = gen6_rps_limits(dev_priv, &val);
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 	WARN_ON(val > dev_priv->rps.max_delay);
 	WARN_ON(val < dev_priv->rps.min_delay);
 
@@ -2398,12 +2404,12 @@ static void gen6_enable_rps(struct drm_device *dev)
 	struct intel_ring_buffer *ring;
 	u32 rp_state_cap;
 	u32 gt_perf_status;
-	u32 pcu_mbox, rc6_mask = 0;
+	u32 rc6vids, pcu_mbox, rc6_mask = 0;
 	u32 gtfifodbg;
 	int rc6_mode;
-	int i;
+	int i, ret;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
 	/* Here begins a magic sequence of register writes to enable
 	 * auto-downclocking.
@@ -2497,30 +2503,16 @@ static void gen6_enable_rps(struct drm_device *dev)
 		   GEN6_RP_UP_BUSY_AVG |
 		   (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
 
-	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-		     500))
-		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
-	I915_WRITE(GEN6_PCODE_DATA, 0);
-	I915_WRITE(GEN6_PCODE_MAILBOX,
-		   GEN6_PCODE_READY |
-		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
-	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-		     500))
-		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
-	/* Check for overclock support */
-	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-		     500))
-		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
-	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
-	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-		     500))
-		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-	if (pcu_mbox & (1<<31)) { /* OC supported */
-		dev_priv->rps.max_delay = pcu_mbox & 0xff;
-		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+	if (!ret) {
+		pcu_mbox = 0;
+		ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+		if (ret && pcu_mbox & (1<<31)) { /* OC supported */
+			dev_priv->rps.max_delay = pcu_mbox & 0xff;
+			DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+		}
+	} else {
+		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
 	}
 
 	gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
@@ -2534,6 +2526,20 @@ static void gen6_enable_rps(struct drm_device *dev)
 	/* enable all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
+	rc6vids = 0;
+	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+	if (IS_GEN6(dev) && ret) {
+		DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+	} else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+		DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+			  GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+		rc6vids &= 0xffff00;
+		rc6vids |= GEN6_ENCODE_RC6_VID(450);
+		ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+		if (ret)
+			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+	}
+
 	gen6_gt_force_wake_put(dev_priv);
 }
 
@@ -2541,10 +2547,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int min_freq = 15;
-	int gpu_freq, ia_freq, max_ia_freq;
+	int gpu_freq;
+	unsigned int ia_freq, max_ia_freq;
 	int scaling_factor = 180;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
 	max_ia_freq = cpufreq_quick_get_max(0);
 	/*
@@ -2575,17 +2582,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
 		else
 			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
 		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+		ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
 
-		I915_WRITE(GEN6_PCODE_DATA,
-			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
-			   gpu_freq);
-		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
-			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
-		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
-			      GEN6_PCODE_READY) == 0, 10)) {
-			DRM_ERROR("pcode write of freq table timed out\n");
-			continue;
-		}
+		sandybridge_pcode_write(dev_priv,
+					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+					ia_freq | gpu_freq);
 	}
 }
 
@@ -2593,16 +2594,16 @@ void ironlake_teardown_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->renderctx) {
-		i915_gem_object_unpin(dev_priv->renderctx);
-		drm_gem_object_unreference(&dev_priv->renderctx->base);
-		dev_priv->renderctx = NULL;
+	if (dev_priv->ips.renderctx) {
+		i915_gem_object_unpin(dev_priv->ips.renderctx);
+		drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+		dev_priv->ips.renderctx = NULL;
 	}
 
-	if (dev_priv->pwrctx) {
-		i915_gem_object_unpin(dev_priv->pwrctx);
-		drm_gem_object_unreference(&dev_priv->pwrctx->base);
-		dev_priv->pwrctx = NULL;
+	if (dev_priv->ips.pwrctx) {
+		i915_gem_object_unpin(dev_priv->ips.pwrctx);
+		drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+		dev_priv->ips.pwrctx = NULL;
 	}
 }
 
@@ -2628,14 +2629,14 @@ static int ironlake_setup_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->renderctx == NULL)
-		dev_priv->renderctx = intel_alloc_context_page(dev);
-	if (!dev_priv->renderctx)
+	if (dev_priv->ips.renderctx == NULL)
+		dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+	if (!dev_priv->ips.renderctx)
 		return -ENOMEM;
 
-	if (dev_priv->pwrctx == NULL)
-		dev_priv->pwrctx = intel_alloc_context_page(dev);
-	if (!dev_priv->pwrctx) {
+	if (dev_priv->ips.pwrctx == NULL)
+		dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+	if (!dev_priv->ips.pwrctx) {
 		ironlake_teardown_rc6(dev);
 		return -ENOMEM;
 	}
@@ -2647,6 +2648,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	bool was_interruptible;
 	int ret;
 
 	/* rc6 disabled by default due to repeated reports of hanging during
@@ -2661,6 +2663,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 	if (ret)
 		return;
 
+	was_interruptible = dev_priv->mm.interruptible;
+	dev_priv->mm.interruptible = false;
+
 	/*
 	 * GPU can automatically power down the render unit if given a page
 	 * to save state.
@@ -2668,12 +2673,13 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 	ret = intel_ring_begin(ring, 6);
 	if (ret) {
 		ironlake_teardown_rc6(dev);
+		dev_priv->mm.interruptible = was_interruptible;
 		return;
 	}
 
 	intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
 	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+	intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
 			MI_MM_SPACE_GTT |
 			MI_SAVE_EXT_STATE_EN |
 			MI_RESTORE_EXT_STATE_EN |
@@ -2688,14 +2694,15 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 	 * does an implicit flush, combined with MI_FLUSH above, it should be
 	 * safe to assume that renderctx is valid
 	 */
-	ret = intel_wait_ring_idle(ring);
+	ret = intel_ring_idle(ring);
+	dev_priv->mm.interruptible = was_interruptible;
 	if (ret) {
 		DRM_ERROR("failed to enable ironlake power power savings\n");
 		ironlake_teardown_rc6(dev);
 		return;
 	}
 
-	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+	I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
@@ -3304,37 +3311,72 @@ static void intel_init_emon(struct drm_device *dev)
 
 void intel_disable_gt_powersave(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	if (IS_IRONLAKE_M(dev)) {
 		ironlake_disable_drps(dev);
 		ironlake_disable_rc6(dev);
 	} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+		mutex_lock(&dev_priv->rps.hw_lock);
 		gen6_disable_rps(dev);
+		mutex_unlock(&dev_priv->rps.hw_lock);
 	}
 }
 
+static void intel_gen6_powersave_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private,
+			     rps.delayed_resume_work.work);
+	struct drm_device *dev = dev_priv->dev;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	gen6_enable_rps(dev);
+	gen6_update_ring_freq(dev);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
 void intel_enable_gt_powersave(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	if (IS_IRONLAKE_M(dev)) {
 		ironlake_enable_drps(dev);
 		ironlake_enable_rc6(dev);
 		intel_init_emon(dev);
 	} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
-		gen6_enable_rps(dev);
-		gen6_update_ring_freq(dev);
+		/*
+		 * PCU communication is slow and this doesn't need to be
+		 * done at any specific time, so do this out of our fast path
+		 * to make resume and init faster.
+		 */
+		schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+				      round_jiffies_up_relative(HZ));
 	}
 }
 
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * On Ibex Peak and Cougar Point, we need to disable clock
+	 * gating for the panel power sequencer or it will fail to
+	 * start up when no ports are active.
+	 */
+	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
 static void ironlake_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
 	/* Required for FBC */
-	dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
-		DPFCRUNIT_CLOCK_GATE_DISABLE |
-		DPFDUNIT_CLOCK_GATE_DISABLE;
-	/* Required for CxSR */
-	dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
 
 	I915_WRITE(PCH_3DCGDIS0,
 		   MARIUNIT_CLOCK_GATE_DISABLE |
@@ -3342,8 +3384,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(PCH_3DCGDIS1,
 		   VFMUNIT_CLOCK_GATE_DISABLE);
 
-	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
 	/*
 	 * According to the spec the following bits should be set in
 	 * order to enable memory self-refresh
@@ -3354,9 +3394,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
 		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
 		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
-	I915_WRITE(ILK_DSPCLK_GATE,
-		   (I915_READ(ILK_DSPCLK_GATE) |
-		    ILK_DPARB_CLK_GATE));
+	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
 	I915_WRITE(DISP_ARB_CTL,
 		   (I915_READ(DISP_ARB_CTL) |
 		    DISP_FBC_WM_DIS));
@@ -3378,28 +3416,56 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
 		I915_WRITE(ILK_DISPLAY_CHICKEN2,
 			   I915_READ(ILK_DISPLAY_CHICKEN2) |
 			   ILK_DPARB_GATE);
-		I915_WRITE(ILK_DSPCLK_GATE,
-			   I915_READ(ILK_DSPCLK_GATE) |
-			   ILK_DPFC_DIS1 |
-			   ILK_DPFC_DIS2 |
-			   ILK_CLK_FBC);
 	}
 
+	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
 		   ILK_ELPIN_409_SELECT);
 	I915_WRITE(_3D_CHICKEN2,
 		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
 		   _3D_CHICKEN2_WM_READ_PIPELINED);
+
+	/* WaDisableRenderCachePipelinedFlush */
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+	ibx_init_clock_gating(dev);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	/*
+	 * On Ibex Peak and Cougar Point, we need to disable clock
+	 * gating for the panel power sequencer or it will fail to
+	 * start up when no ports are active.
+	 */
+	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+		   DPLS_EDP_PPS_FIX_DIS);
+	/* The below fixes the weird display corruption, a few pixels shifted
+	 * downward, on (only) LVDS of some HP laptops with IVY.
+	 */
+	for_each_pipe(pipe)
+		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
+	/* WADP0ClockGatingDisable */
+	for_each_pipe(pipe) {
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+	}
 }
 
 static void gen6_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
-	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
-	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 
 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
@@ -3454,11 +3520,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
 		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
-	I915_WRITE(ILK_DSPCLK_GATE,
-		   I915_READ(ILK_DSPCLK_GATE) |
-		   ILK_DPARB_CLK_GATE  |
-		   ILK_DPFD_CLK_GATE);
+	I915_WRITE(ILK_DSPCLK_GATE_D,
+		   I915_READ(ILK_DSPCLK_GATE_D) |
+		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
+		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
+	/* WaMbcDriverBootEnable */
 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
 		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
@@ -3473,6 +3540,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
 	 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
 	I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
 	I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
+
+	cpt_init_clock_gating(dev);
 }
 
 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3487,13 +3556,24 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
 }
 
+static void lpt_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * TODO: this bit should only be enabled when really needed, then
+	 * disabled when not needed anymore in order to save power.
+	 */
+	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+		I915_WRITE(SOUTH_DSPCLK_GATE_D,
+			   I915_READ(SOUTH_DSPCLK_GATE_D) |
+			   PCH_LP_PARTITION_LEVEL_DISABLE);
+}
+
 static void haswell_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
-	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
-	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
 	I915_WRITE(WM3_LP_ILK, 0);
 	I915_WRITE(WM2_LP_ILK, 0);
@@ -3504,12 +3584,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
 	 */
 	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
-	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
-	I915_WRITE(IVB_CHICKEN3,
-		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
-		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
 	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3538,6 +3612,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(CACHE_MODE_1,
 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
+	/* WaMbcDriverBootEnable */
+	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
 	/* XXX: This is a workaround for early silicon revisions and should be
 	 * removed later.
 	 */
@@ -3547,27 +3625,38 @@ static void haswell_init_clock_gating(struct drm_device *dev)
 			WM_DBG_DISALLOW_SPRITE |
 			WM_DBG_DISALLOW_MAXFIFO);
 
+	lpt_init_clock_gating(dev);
 }
 
 static void ivybridge_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
-	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 	uint32_t snpcr;
 
-	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
 	I915_WRITE(WM3_LP_ILK, 0);
 	I915_WRITE(WM2_LP_ILK, 0);
 	I915_WRITE(WM1_LP_ILK, 0);
 
-	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
+	/* WaDisableEarlyCull */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+	/* WaDisableBackToBackFlipFix */
 	I915_WRITE(IVB_CHICKEN3,
 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
+	/* WaDisablePSDDualDispatchEnable */
+	if (IS_IVB_GT1(dev))
+		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+	else
+		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
+			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
 	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3576,7 +3665,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(GEN7_L3CNTLREG1,
 			GEN7_WA_FOR_GEN7_L3_CONTROL);
 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
-			GEN7_WA_L3_CHICKEN_MODE);
+		   GEN7_WA_L3_CHICKEN_MODE);
+	if (IS_IVB_GT1(dev))
+		I915_WRITE(GEN7_ROW_CHICKEN2,
+			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+	else
+		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+
+	/* WaForceL3Serialization */
+	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
 	 * gating disable must be set.  Failure to set it results in
@@ -3607,6 +3707,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 		intel_flush_display_plane(dev_priv, pipe);
 	}
 
+	/* WaMbcDriverBootEnable */
 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
 		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
@@ -3620,39 +3721,59 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
 	snpcr |= GEN6_MBC_SNPCR_MED;
 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+	cpt_init_clock_gating(dev);
 }
 
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
-	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
-	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
 	I915_WRITE(WM3_LP_ILK, 0);
 	I915_WRITE(WM2_LP_ILK, 0);
 	I915_WRITE(WM1_LP_ILK, 0);
 
-	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
+	/* WaDisableEarlyCull */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+	/* WaDisableBackToBackFlipFix */
 	I915_WRITE(IVB_CHICKEN3,
 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
+	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+		   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
 	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
 	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
-	I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+	I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
 
+	/* WaForceL3Serialization */
+	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+	/* WaDisableDopClockGating */
+	I915_WRITE(GEN7_ROW_CHICKEN2,
+		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+	/* WaForceL3Serialization */
+	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
 	/* This is required by WaCatErrorRejectionIssue */
 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
 		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
+	/* WaMbcDriverBootEnable */
 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
 		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
@@ -3704,6 +3825,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
 		   PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
 		   SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
 		   PLANEA_FLIPDONE_INT_EN);
+
+	/*
+	 * WaDisableVLVClockGating_VBIIssue
+	 * Disable clock gating on th GCFG unit to prevent a delay
+	 * in the reporting of vblank events.
+	 */
+	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
 }
 
 static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3722,6 +3850,10 @@ static void g4x_init_clock_gating(struct drm_device *dev)
 	if (IS_GM45(dev))
 		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
 	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+	/* WaDisableRenderCachePipelinedFlush */
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 }
 
 static void crestline_init_clock_gating(struct drm_device *dev)
@@ -3777,44 +3909,11 @@ static void i830_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 }
 
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/*
-	 * On Ibex Peak and Cougar Point, we need to disable clock
-	 * gating for the panel power sequencer or it will fail to
-	 * start up when no ports are active.
-	 */
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe;
-
-	/*
-	 * On Ibex Peak and Cougar Point, we need to disable clock
-	 * gating for the panel power sequencer or it will fail to
-	 * start up when no ports are active.
-	 */
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
-		   DPLS_EDP_PPS_FIX_DIS);
-	/* Without this, mode sets may fail silently on FDI */
-	for_each_pipe(pipe)
-		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
 void intel_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	dev_priv->display.init_clock_gating(dev);
-
-	if (dev_priv->display.init_pch_clock_gating)
-		dev_priv->display.init_pch_clock_gating(dev);
 }
 
 /* Starting with Haswell, we have different power wells for
@@ -3840,7 +3939,7 @@ void intel_init_power_wells(struct drm_device *dev)
 
 		if ((well & HSW_PWR_WELL_STATE) == 0) {
 			I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
-			if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+			if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
 				DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
 		}
 	}
@@ -3878,11 +3977,6 @@ void intel_init_pm(struct drm_device *dev)
 
 	/* For FIFO watermark updates */
 	if (HAS_PCH_SPLIT(dev)) {
-		if (HAS_PCH_IBX(dev))
-			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
-		else if (HAS_PCH_CPT(dev))
-			dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
 		if (IS_GEN5(dev)) {
 			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
 				dev_priv->display.update_wm = ironlake_update_wm;
@@ -3993,6 +4087,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
 		DRM_ERROR("GT thread status wait timed out\n");
 }
 
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE, 0);
+	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
 	u32 forcewake_ack;
@@ -4006,7 +4106,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 			    FORCEWAKE_ACK_TIMEOUT_MS))
 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
-	I915_WRITE_NOTRACE(FORCEWAKE, 1);
+	I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
 	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
 	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4016,6 +4116,12 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
 {
 	u32 forcewake_ack;
@@ -4029,7 +4135,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
 			    FORCEWAKE_ACK_TIMEOUT_MS))
 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
-	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
 	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4073,7 +4179,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 
 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 	/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
 }
@@ -4111,13 +4217,18 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 	return ret;
 }
 
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+}
+
 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 {
 	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
 			    FORCEWAKE_ACK_TIMEOUT_MS))
 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
-	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
 	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
 			    FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4128,49 +4239,89 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 
 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 	/* The below doubles as a POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
+void intel_gt_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_VALLEYVIEW(dev)) {
+		vlv_force_wake_reset(dev_priv);
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		__gen6_gt_force_wake_reset(dev_priv);
+		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+			__gen6_gt_force_wake_mt_reset(dev_priv);
+	}
+}
+
 void intel_gt_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	spin_lock_init(&dev_priv->gt_lock);
 
+	intel_gt_reset(dev);
+
 	if (IS_VALLEYVIEW(dev)) {
 		dev_priv->gt.force_wake_get = vlv_force_wake_get;
 		dev_priv->gt.force_wake_put = vlv_force_wake_put;
-	} else if (INTEL_INFO(dev)->gen >= 6) {
+	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+		dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
+		dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+	} else if (IS_GEN6(dev)) {
 		dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
 		dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
-
-		/* IVB configs may use multi-threaded forcewake */
-		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-			u32 ecobus;
-
-			/* A small trick here - if the bios hasn't configured
-			 * MT forcewake, and if the device is in RC6, then
-			 * force_wake_mt_get will not wake the device and the
-			 * ECOBUS read will return zero. Which will be
-			 * (correctly) interpreted by the test below as MT
-			 * forcewake being disabled.
-			 */
-			mutex_lock(&dev->struct_mutex);
-			__gen6_gt_force_wake_mt_get(dev_priv);
-			ecobus = I915_READ_NOTRACE(ECOBUS);
-			__gen6_gt_force_wake_mt_put(dev_priv);
-			mutex_unlock(&dev->struct_mutex);
-
-			if (ecobus & FORCEWAKE_MT_ENABLE) {
-				DRM_DEBUG_KMS("Using MT version of forcewake\n");
-				dev_priv->gt.force_wake_get =
-					__gen6_gt_force_wake_mt_get;
-				dev_priv->gt.force_wake_put =
-					__gen6_gt_force_wake_mt_put;
-			}
-		}
 	}
+	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+			  intel_gen6_powersave_work);
 }
 
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+		DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+		return -EAGAIN;
+	}
+
+	I915_WRITE(GEN6_PCODE_DATA, *val);
+	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500)) {
+		DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+		return -ETIMEDOUT;
+	}
+
+	*val = I915_READ(GEN6_PCODE_DATA);
+	I915_WRITE(GEN6_PCODE_DATA, 0);
+
+	return 0;
+}
+
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+		DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+		return -EAGAIN;
+	}
+
+	I915_WRITE(GEN6_PCODE_DATA, val);
+	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500)) {
+		DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+		return -ETIMEDOUT;
+	}
+
+	I915_WRITE(GEN6_PCODE_DATA, 0);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ecbc5c5dbbbc..2346b920bd86 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -45,7 +45,7 @@ struct pipe_control {
 
 static inline int ring_space(struct intel_ring_buffer *ring)
 {
-	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
 	if (space < 0)
 		space += ring->size;
 	return space;
@@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
 		/*
 		 * TLB invalidate requires a post-sync write.
 		 */
-		flags |= PIPE_CONTROL_QW_WRITE;
+		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
 	ret = intel_ring_begin(ring, 4);
@@ -555,15 +555,11 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
 
 static void
 update_mboxes(struct intel_ring_buffer *ring,
-	    u32 seqno,
-	    u32 mmio_offset)
+	      u32 mmio_offset)
 {
-	intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
-			      MI_SEMAPHORE_GLOBAL_GTT |
-			      MI_SEMAPHORE_REGISTER |
-			      MI_SEMAPHORE_UPDATE);
-	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 	intel_ring_emit(ring, mmio_offset);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
 }
 
 /**
@@ -576,8 +572,7 @@ update_mboxes(struct intel_ring_buffer *ring,
  * This acts like a signal in the canonical semaphore.
  */
 static int
-gen6_add_request(struct intel_ring_buffer *ring,
-		 u32 *seqno)
+gen6_add_request(struct intel_ring_buffer *ring)
 {
 	u32 mbox1_reg;
 	u32 mbox2_reg;
@@ -590,13 +585,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
 	mbox1_reg = ring->signal_mbox[0];
 	mbox2_reg = ring->signal_mbox[1];
 
-	*seqno = i915_gem_next_request_seqno(ring);
-
-	update_mboxes(ring, *seqno, mbox1_reg);
-	update_mboxes(ring, *seqno, mbox2_reg);
+	update_mboxes(ring, mbox1_reg);
+	update_mboxes(ring, mbox2_reg);
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, *seqno);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_advance(ring);
 
@@ -653,10 +646,8 @@ do {									\
 } while (0)
 
 static int
-pc_render_add_request(struct intel_ring_buffer *ring,
-		      u32 *result)
+pc_render_add_request(struct intel_ring_buffer *ring)
 {
-	u32 seqno = i915_gem_next_request_seqno(ring);
 	struct pipe_control *pc = ring->private;
 	u32 scratch_addr = pc->gtt_offset + 128;
 	int ret;
@@ -677,7 +668,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
 	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
 	scratch_addr += 128; /* write to separate cachelines */
@@ -696,11 +687,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
 	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
 	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
 
-	*result = seqno;
 	return 0;
 }
 
@@ -888,25 +878,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-i9xx_add_request(struct intel_ring_buffer *ring,
-		 u32 *result)
+i9xx_add_request(struct intel_ring_buffer *ring)
 {
-	u32 seqno;
 	int ret;
 
 	ret = intel_ring_begin(ring, 4);
 	if (ret)
 		return ret;
 
-	seqno = i915_gem_next_request_seqno(ring);
-
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_advance(ring);
 
-	*result = seqno;
 	return 0;
 }
 
@@ -964,7 +949,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
 }
 
 static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			 u32 offset, u32 length,
+			 unsigned flags)
 {
 	int ret;
 
@@ -975,7 +962,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
 	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			MI_BATCH_GTT |
-			MI_BATCH_NON_SECURE_I965);
+			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
 	intel_ring_emit(ring, offset);
 	intel_ring_advance(ring);
 
@@ -984,7 +971,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
 
 static int
 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
-				u32 offset, u32 len)
+				u32 offset, u32 len,
+				unsigned flags)
 {
 	int ret;
 
@@ -993,7 +981,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
 		return ret;
 
 	intel_ring_emit(ring, MI_BATCH_BUFFER);
-	intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+	intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
 	intel_ring_emit(ring, offset + len - 8);
 	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
@@ -1003,7 +991,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
 
 static int
 i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
-				u32 offset, u32 len)
+			 u32 offset, u32 len,
+			 unsigned flags)
 {
 	int ret;
 
@@ -1012,7 +1001,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
 		return ret;
 
 	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+	intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
 	intel_ring_advance(ring);
 
 	return 0;
@@ -1075,6 +1064,29 @@ err:
 	return ret;
 }
 
+static int init_phys_hws_pga(struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	u32 addr;
+
+	if (!dev_priv->status_page_dmah) {
+		dev_priv->status_page_dmah =
+			drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+		if (!dev_priv->status_page_dmah)
+			return -ENOMEM;
+	}
+
+	addr = dev_priv->status_page_dmah->busaddr;
+	if (INTEL_INFO(ring->dev)->gen >= 4)
+		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+	I915_WRITE(HWS_PGA, addr);
+
+	ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+	return 0;
+}
+
 static int intel_init_ring_buffer(struct drm_device *dev,
 				  struct intel_ring_buffer *ring)
 {
@@ -1086,6 +1098,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
 	ring->size = 32 * PAGE_SIZE;
+	memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
 
 	init_waitqueue_head(&ring->irq_queue);
 
@@ -1093,6 +1106,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 		ret = init_status_page(ring);
 		if (ret)
 			return ret;
+	} else {
+		BUG_ON(ring->id != RCS);
+		ret = init_phys_hws_pga(ring);
+		if (ret)
+			return ret;
 	}
 
 	obj = i915_gem_alloc_object(dev, ring->size);
@@ -1157,7 +1175,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 
 	/* Disable the ring buffer. The ring must be idle at this point */
 	dev_priv = ring->dev->dev_private;
-	ret = intel_wait_ring_idle(ring);
+	ret = intel_ring_idle(ring);
 	if (ret)
 		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
 			  ring->name, ret);
@@ -1176,28 +1194,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 	cleanup_status_page(ring);
 }
 
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
-{
-	uint32_t __iomem *virt;
-	int rem = ring->size - ring->tail;
-
-	if (ring->space < rem) {
-		int ret = intel_wait_ring_buffer(ring, rem);
-		if (ret)
-			return ret;
-	}
-
-	virt = ring->virtual_start + ring->tail;
-	rem /= 4;
-	while (rem--)
-		iowrite32(MI_NOOP, virt++);
-
-	ring->tail = 0;
-	ring->space = ring_space(ring);
-
-	return 0;
-}
-
 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
 	int ret;
@@ -1231,7 +1227,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
 		if (request->tail == -1)
 			continue;
 
-		space = request->tail - (ring->tail + 8);
+		space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
 		if (space < 0)
 			space += ring->size;
 		if (space >= n) {
@@ -1266,7 +1262,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
 	return 0;
 }
 
-int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,6 +1305,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
 	return -EBUSY;
 }
 
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+	uint32_t __iomem *virt;
+	int rem = ring->size - ring->tail;
+
+	if (ring->space < rem) {
+		int ret = ring_wait_for_space(ring, rem);
+		if (ret)
+			return ret;
+	}
+
+	virt = ring->virtual_start + ring->tail;
+	rem /= 4;
+	while (rem--)
+		iowrite32(MI_NOOP, virt++);
+
+	ring->tail = 0;
+	ring->space = ring_space(ring);
+
+	return 0;
+}
+
+int intel_ring_idle(struct intel_ring_buffer *ring)
+{
+	u32 seqno;
+	int ret;
+
+	/* We need to add any requests required to flush the objects and ring */
+	if (ring->outstanding_lazy_request) {
+		ret = i915_add_request(ring, NULL, NULL);
+		if (ret)
+			return ret;
+	}
+
+	/* Wait upon the last request to be completed */
+	if (list_empty(&ring->request_list))
+		return 0;
+
+	seqno = list_entry(ring->request_list.prev,
+			   struct drm_i915_gem_request,
+			   list)->seqno;
+
+	return i915_wait_seqno(ring, seqno);
+}
+
+static int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+	if (ring->outstanding_lazy_request)
+		return 0;
+
+	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
 int intel_ring_begin(struct intel_ring_buffer *ring,
 		     int num_dwords)
 {
@@ -1320,6 +1370,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
 	if (ret)
 		return ret;
 
+	/* Preallocate the olr before touching the ring */
+	ret = intel_ring_alloc_seqno(ring);
+	if (ret)
+		return ret;
+
 	if (unlikely(ring->tail + n > ring->effective_size)) {
 		ret = intel_wrap_ring_buffer(ring);
 		if (unlikely(ret))
@@ -1327,7 +1382,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
 	}
 
 	if (unlikely(ring->space < n)) {
-		ret = intel_wait_ring_buffer(ring, n);
+		ret = ring_wait_for_space(ring, n);
 		if (unlikely(ret))
 			return ret;
 	}
@@ -1391,10 +1446,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
+	/*
+	 * Bspec vol 1c.5 - video engine command streamer:
+	 * "If ENABLED, all TLBs will be invalidated once the flush
+	 * operation is complete. This bit is only valid when the
+	 * Post-Sync Operation field is a value of 1h or 3h."
+	 */
 	if (invalidate & I915_GEM_GPU_DOMAINS)
-		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+			MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
@@ -1402,8 +1464,9 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
-			      u32 offset, u32 len)
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			      u32 offset, u32 len,
+			      unsigned flags)
 {
 	int ret;
 
@@ -1411,7 +1474,30 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+	/* bit0-7 is the length on GEN6+ */
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			      u32 offset, u32 len,
+			      unsigned flags)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START |
+			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
 	/* bit0-7 is the length on GEN6+ */
 	intel_ring_emit(ring, offset);
 	intel_ring_advance(ring);
@@ -1432,10 +1518,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
+	/*
+	 * Bspec vol 1c.3 - blitter engine command streamer:
+	 * "If ENABLED, all TLBs will be invalidated once the flush
+	 * operation is complete. This bit is only valid when the
+	 * Post-Sync Operation field is a value of 1h or 3h."
+	 */
 	if (invalidate & I915_GEM_DOMAIN_RENDER)
-		cmd |= MI_INVALIDATE_TLB;
+		cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+			MI_FLUSH_DW_OP_STOREDW;
 	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
@@ -1490,7 +1583,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 		ring->irq_enable_mask = I915_USER_INTERRUPT;
 	}
 	ring->write_tail = ring_write_tail;
-	if (INTEL_INFO(dev)->gen >= 6)
+	if (IS_HASWELL(dev))
+		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+	else if (INTEL_INFO(dev)->gen >= 6)
 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 	else if (INTEL_INFO(dev)->gen >= 4)
 		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@@ -1501,12 +1596,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 	ring->init = init_render_ring;
 	ring->cleanup = render_ring_cleanup;
 
-
-	if (!I915_NEED_GFX_HWS(dev)) {
-		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
-	}
-
 	return intel_init_ring_buffer(dev, ring);
 }
 
@@ -1514,6 +1603,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	int ret;
 
 	ring->name = "render ring";
 	ring->id = RCS;
@@ -1551,16 +1641,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 	ring->init = init_render_ring;
 	ring->cleanup = render_ring_cleanup;
 
-	if (!I915_NEED_GFX_HWS(dev))
-		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
 
 	ring->size = size;
 	ring->effective_size = ring->size;
-	if (IS_I830(ring->dev))
+	if (IS_I830(ring->dev) || IS_845G(ring->dev))
 		ring->effective_size -= 128;
 
 	ring->virtual_start = ioremap_wc(start, size);
@@ -1570,6 +1657,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 		return -ENOMEM;
 	}
 
+	if (!I915_NEED_GFX_HWS(dev)) {
+		ret = init_phys_hws_pga(ring);
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -1618,7 +1711,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 	}
 	ring->init = init_ring_common;
 
-
 	return intel_init_ring_buffer(dev, ring);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ea7a311a1f0..526182ed0c6d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,17 @@
 #ifndef _INTEL_RINGBUFFER_H_
 #define _INTEL_RINGBUFFER_H_
 
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
 struct  intel_hw_status_page {
 	u32		*page_addr;
 	unsigned int	gfx_addr;
@@ -70,8 +81,7 @@ struct  intel_ring_buffer {
 	int __must_check (*flush)(struct intel_ring_buffer *ring,
 				  u32	invalidate_domains,
 				  u32	flush_domains);
-	int		(*add_request)(struct intel_ring_buffer *ring,
-				       u32 *seqno);
+	int		(*add_request)(struct intel_ring_buffer *ring);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
@@ -81,7 +91,9 @@ struct  intel_ring_buffer {
 	u32		(*get_seqno)(struct intel_ring_buffer *ring,
 				     bool lazy_coherency);
 	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
-					       u32 offset, u32 length);
+					       u32 offset, u32 length,
+					       unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
 	void		(*cleanup)(struct intel_ring_buffer *ring);
 	int		(*sync_to)(struct intel_ring_buffer *ring,
 				   struct intel_ring_buffer *to,
@@ -181,27 +193,21 @@ intel_read_status_page(struct intel_ring_buffer *ring,
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
 #define I915_GEM_HWS_INDEX		0x20
+#define I915_GEM_HWS_SCRATCH_INDEX	0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 
-int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
-static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
-{
-	return intel_wait_ring_buffer(ring, ring->size - 8);
-}
-
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
-
 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
 				   u32 data)
 {
 	iowrite32(data, ring->virtual_start + ring->tail);
 	ring->tail += 4;
 }
-
 void intel_ring_advance(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
 
-u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
@@ -217,6 +223,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
 	return ring->tail;
 }
 
+static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+	BUG_ON(ring->outstanding_lazy_request == 0);
+	return ring->outstanding_lazy_request;
+}
+
 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
 {
 	if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a6ac0b416964..c275bf0fa36d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -509,7 +509,7 @@ out:
 static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 				     void *response, int response_len)
 {
-	u8 retry = 5;
+	u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
 	u8 status;
 	int i;
 
@@ -522,14 +522,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 	 * command to be complete.
 	 *
 	 * Check 5 times in case the hardware failed to read the docs.
+	 *
+	 * Also beware that the first response by many devices is to
+	 * reply PENDING and stall for time. TVs are notorious for
+	 * requiring longer than specified to complete their replies.
+	 * Originally (in the DDX long ago), the delay was only ever 15ms
+	 * with an additional delay of 30ms applied for TVs added later after
+	 * many experiments. To accommodate both sets of delays, we do a
+	 * sequence of slow checks if the device is falling behind and fails
+	 * to reply within 5*15µs.
 	 */
 	if (!intel_sdvo_read_byte(intel_sdvo,
 				  SDVO_I2C_CMD_STATUS,
 				  &status))
 		goto log_fail;
 
-	while (status == SDVO_CMD_STATUS_PENDING && retry--) {
-		udelay(15);
+	while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+		if (retry < 10)
+			msleep(15);
+		else
+			udelay(15);
+
 		if (!intel_sdvo_read_byte(intel_sdvo,
 					  SDVO_I2C_CMD_STATUS,
 					  &status))
@@ -1228,6 +1241,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
 
 	temp = I915_READ(intel_sdvo->sdvo_reg);
 	if ((temp & SDVO_ENABLE) != 0) {
+		/* HW workaround for IBX, we need to move the port to
+		 * transcoder A before disabling it. */
+		if (HAS_PCH_IBX(encoder->base.dev)) {
+			struct drm_crtc *crtc = encoder->base.crtc;
+			int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+			if (temp & SDVO_PIPE_B_SELECT) {
+				temp &= ~SDVO_PIPE_B_SELECT;
+				I915_WRITE(intel_sdvo->sdvo_reg, temp);
+				POSTING_READ(intel_sdvo->sdvo_reg);
+
+				/* Again we need to write this twice. */
+				I915_WRITE(intel_sdvo->sdvo_reg, temp);
+				POSTING_READ(intel_sdvo->sdvo_reg);
+
+				/* Transcoder selection bits only update
+				 * effectively on vblank. */
+				if (crtc)
+					intel_wait_for_vblank(encoder->base.dev, pipe);
+				else
+					msleep(50);
+			}
+		}
+
 		intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
 	}
 }
@@ -1244,8 +1281,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
 	u8 status;
 
 	temp = I915_READ(intel_sdvo->sdvo_reg);
-	if ((temp & SDVO_ENABLE) == 0)
+	if ((temp & SDVO_ENABLE) == 0) {
+		/* HW workaround for IBX, we need to move the port
+		 * to transcoder A before disabling it. */
+		if (HAS_PCH_IBX(dev)) {
+			struct drm_crtc *crtc = encoder->base.crtc;
+			int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+			/* Restore the transcoder select bit. */
+			if (pipe == PIPE_B)
+				temp |= SDVO_PIPE_B_SELECT;
+		}
+
 		intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+	}
 	for (i = 0; i < 2; i++)
 		intel_wait_for_vblank(dev, intel_crtc->pipe);
 
@@ -1499,15 +1548,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
 	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 	enum drm_connector_status ret;
 
-	if (!intel_sdvo_write_cmd(intel_sdvo,
-				  SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
-		return connector_status_unknown;
-
-	/* add 30ms delay when the output type might be TV */
-	if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
-		msleep(30);
-
-	if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_ATTACHED_DISPLAYS,
+				  &response, 2))
 		return connector_status_unknown;
 
 	DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@ -1796,7 +1839,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
 	intel_sdvo_destroy_enhance_property(connector);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
-	kfree(connector);
+	kfree(intel_sdvo_connector);
 }
 
 static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1828,7 +1871,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
 	uint8_t cmd;
 	int ret;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret)
 		return ret;
 
@@ -1883,7 +1926,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
 	} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
 		temp_value = val;
 		if (intel_sdvo_connector->left == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 intel_sdvo_connector->right, val);
 			if (intel_sdvo_connector->left_margin == temp_value)
 				return 0;
@@ -1895,7 +1938,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
 			goto set_value;
 		} else if (intel_sdvo_connector->right == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 intel_sdvo_connector->left, val);
 			if (intel_sdvo_connector->right_margin == temp_value)
 				return 0;
@@ -1907,7 +1950,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
 			cmd = SDVO_CMD_SET_OVERSCAN_H;
 			goto set_value;
 		} else if (intel_sdvo_connector->top == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 intel_sdvo_connector->bottom, val);
 			if (intel_sdvo_connector->top_margin == temp_value)
 				return 0;
@@ -1919,7 +1962,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
 			cmd = SDVO_CMD_SET_OVERSCAN_V;
 			goto set_value;
 		} else if (intel_sdvo_connector->bottom == property) {
-			drm_connector_property_set_value(connector,
+			drm_object_property_set_value(&connector->base,
 							 intel_sdvo_connector->top, val);
 			if (intel_sdvo_connector->bottom_margin == temp_value)
 				return 0;
@@ -2072,17 +2115,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
 	else
 		mapping = &dev_priv->sdvo_mappings[1];
 
-	pin = GMBUS_PORT_DPB;
-	if (mapping->initialized)
+	if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
 		pin = mapping->i2c_pin;
+	else
+		pin = GMBUS_PORT_DPB;
 
-	if (intel_gmbus_is_port_valid(pin)) {
-		sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
-		intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
-		intel_gmbus_force_bit(sdvo->i2c, true);
-	} else {
-		sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
-	}
+	sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+	/* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+	 * our code totally fails once we start using gmbus. Hence fall back to
+	 * bit banging for now. */
+	intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+	intel_gmbus_force_bit(sdvo->i2c, false);
 }
 
 static bool
@@ -2427,7 +2477,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
 				i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
 
 	intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
-	drm_connector_attach_property(&intel_sdvo_connector->base.base,
+	drm_object_attach_property(&intel_sdvo_connector->base.base.base,
 				      intel_sdvo_connector->tv_format, 0);
 	return true;
 
@@ -2443,7 +2493,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
 		intel_sdvo_connector->name = \
 			drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
 		if (!intel_sdvo_connector->name) return false; \
-		drm_connector_attach_property(connector, \
+		drm_object_attach_property(&connector->base, \
 					      intel_sdvo_connector->name, \
 					      intel_sdvo_connector->cur_##name); \
 		DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2480,7 +2530,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
 		if (!intel_sdvo_connector->left)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->left,
 					      intel_sdvo_connector->left_margin);
 
@@ -2489,7 +2539,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
 		if (!intel_sdvo_connector->right)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->right,
 					      intel_sdvo_connector->right_margin);
 		DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2517,7 +2567,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
 		if (!intel_sdvo_connector->top)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->top,
 					      intel_sdvo_connector->top_margin);
 
@@ -2527,7 +2577,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
 		if (!intel_sdvo_connector->bottom)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->bottom,
 					      intel_sdvo_connector->bottom_margin);
 		DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2559,7 +2609,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
 		if (!intel_sdvo_connector->dot_crawl)
 			return false;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      intel_sdvo_connector->dot_crawl,
 					      intel_sdvo_connector->cur_dot_crawl);
 		DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2663,10 +2713,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
 	intel_sdvo->is_sdvob = is_sdvob;
 	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
 	intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
-	if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
-		kfree(intel_sdvo);
-		return false;
-	}
+	if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+		goto err_i2c_bus;
 
 	/* encoder type will be decided later */
 	intel_encoder = &intel_sdvo->base;
@@ -2765,6 +2813,8 @@ err_output:
 err:
 	drm_encoder_cleanup(&intel_encoder->base);
 	i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+	intel_sdvo_unselect_i2c_bus(intel_sdvo);
 	kfree(intel_sdvo);
 
 	return false;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 82f5e5c7009d..827dcd4edf1c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	int pipe = intel_plane->pipe;
 	u32 sprctl, sprscale = 0;
-	int pixel_size;
+	unsigned long sprsurf_offset, linear_offset;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
 	sprctl = I915_READ(SPRCTL(pipe));
 
@@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 	switch (fb->pixel_format) {
 	case DRM_FORMAT_XBGR8888:
 		sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
-		pixel_size = 4;
 		break;
 	case DRM_FORMAT_XRGB8888:
 		sprctl |= SPRITE_FORMAT_RGBX888;
-		pixel_size = 4;
 		break;
 	case DRM_FORMAT_YUYV:
 		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
-		pixel_size = 2;
 		break;
 	case DRM_FORMAT_YVYU:
 		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
-		pixel_size = 2;
 		break;
 	case DRM_FORMAT_UYVY:
 		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
-		pixel_size = 2;
 		break;
 	case DRM_FORMAT_VYUY:
 		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
-		pixel_size = 2;
 		break;
 	default:
-		DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
-		sprctl |= SPRITE_FORMAT_RGBX888;
-		pixel_size = 4;
-		break;
+		BUG();
 	}
 
 	if (obj->tiling_mode != I915_TILING_NONE)
@@ -127,18 +119,28 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 
 	I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
 	I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-	if (obj->tiling_mode != I915_TILING_NONE) {
-		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
-	} else {
-		unsigned long offset;
 
-		offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
-		I915_WRITE(SPRLINOFF(pipe), offset);
-	}
+	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+	sprsurf_offset =
+		intel_gen4_compute_offset_xtiled(&x, &y,
+						 fb->bits_per_pixel / 8,
+						 fb->pitches[0]);
+	linear_offset -= sprsurf_offset;
+
+	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+	 * register */
+	if (IS_HASWELL(dev))
+		I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+	else if (obj->tiling_mode != I915_TILING_NONE)
+		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+	else
+		I915_WRITE(SPRLINOFF(pipe), linear_offset);
+
 	I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
-	I915_WRITE(SPRSCALE(pipe), sprscale);
+	if (intel_plane->can_scale)
+		I915_WRITE(SPRSCALE(pipe), sprscale);
 	I915_WRITE(SPRCTL(pipe), sprctl);
-	I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
+	I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
 	POSTING_READ(SPRSURF(pipe));
 }
 
@@ -152,7 +154,8 @@ ivb_disable_plane(struct drm_plane *plane)
 
 	I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
 	/* Can't leave the scaler enabled... */
-	I915_WRITE(SPRSCALE(pipe), 0);
+	if (intel_plane->can_scale)
+		I915_WRITE(SPRSCALE(pipe), 0);
 	/* Activate double buffered register update */
 	I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
 	POSTING_READ(SPRSURF(pipe));
@@ -225,8 +228,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
-	int pipe = intel_plane->pipe, pixel_size;
+	int pipe = intel_plane->pipe;
+	unsigned long dvssurf_offset, linear_offset;
 	u32 dvscntr, dvsscale;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
 	dvscntr = I915_READ(DVSCNTR(pipe));
 
@@ -239,33 +244,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 	switch (fb->pixel_format) {
 	case DRM_FORMAT_XBGR8888:
 		dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
-		pixel_size = 4;
 		break;
 	case DRM_FORMAT_XRGB8888:
 		dvscntr |= DVS_FORMAT_RGBX888;
-		pixel_size = 4;
 		break;
 	case DRM_FORMAT_YUYV:
 		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
-		pixel_size = 2;
 		break;
 	case DRM_FORMAT_YVYU:
 		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
-		pixel_size = 2;
 		break;
 	case DRM_FORMAT_UYVY:
 		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
-		pixel_size = 2;
 		break;
 	case DRM_FORMAT_VYUY:
 		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
-		pixel_size = 2;
 		break;
 	default:
-		DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
-		dvscntr |= DVS_FORMAT_RGBX888;
-		pixel_size = 4;
-		break;
+		BUG();
 	}
 
 	if (obj->tiling_mode != I915_TILING_NONE)
@@ -289,18 +285,23 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 
 	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
 	I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-	if (obj->tiling_mode != I915_TILING_NONE) {
-		I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
-	} else {
-		unsigned long offset;
 
-		offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
-		I915_WRITE(DVSLINOFF(pipe), offset);
-	}
+	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+	dvssurf_offset =
+		intel_gen4_compute_offset_xtiled(&x, &y,
+						 fb->bits_per_pixel / 8,
+						 fb->pitches[0]);
+	linear_offset -= dvssurf_offset;
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+	else
+		I915_WRITE(DVSLINOFF(pipe), linear_offset);
+
 	I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
-	I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
+	I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
 	POSTING_READ(DVSSURF(pipe));
 }
 
@@ -422,6 +423,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 	struct intel_framebuffer *intel_fb;
 	struct drm_i915_gem_object *obj, *old_obj;
 	int pipe = intel_plane->pipe;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
 	int ret = 0;
 	int x = src_x >> 16, y = src_y >> 16;
 	int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@@ -436,7 +439,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 	src_h = src_h >> 16;
 
 	/* Pipe must be running... */
-	if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+	if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
 		return -EINVAL;
 
 	if (crtc_x >= primary_w || crtc_y >= primary_h)
@@ -446,6 +449,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 	if (intel_plane->pipe != intel_crtc->pipe)
 		return -EINVAL;
 
+	/* Sprite planes can be linear or x-tiled surfaces */
+	switch (obj->tiling_mode) {
+		case I915_TILING_NONE:
+		case I915_TILING_X:
+			break;
+		default:
+			return -EINVAL;
+	}
+
 	/*
 	 * Clamp the width & height into the visible area.  Note we don't
 	 * try to scale the source if part of the visible region is offscreen.
@@ -472,6 +484,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 	if (!crtc_w || !crtc_h) /* Again, nothing to display */
 		goto out;
 
+	/*
+	 * We may not have a scaler, eg. HSW does not have it any more
+	 */
+	if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
+		return -EINVAL;
+
 	/*
 	 * We can take a larger source and scale it down, but
 	 * only so much...  16x is the max on SNB.
@@ -665,6 +683,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
 	switch (INTEL_INFO(dev)->gen) {
 	case 5:
 	case 6:
+		intel_plane->can_scale = true;
 		intel_plane->max_downscale = 16;
 		intel_plane->update_plane = ilk_update_plane;
 		intel_plane->disable_plane = ilk_disable_plane;
@@ -681,6 +700,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
 		break;
 
 	case 7:
+		if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
+			intel_plane->can_scale = false;
+		else
+			intel_plane->can_scale = true;
 		intel_plane->max_downscale = 2;
 		intel_plane->update_plane = ivb_update_plane;
 		intel_plane->disable_plane = ivb_disable_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 62bb048c135e..ea93520c1278 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1088,13 +1088,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		int dspcntr_reg = DSPCNTR(intel_crtc->plane);
 		int pipeconf = I915_READ(pipeconf_reg);
 		int dspcntr = I915_READ(dspcntr_reg);
-		int dspbase_reg = DSPADDR(intel_crtc->plane);
 		int xpos = 0x0, ypos = 0x0;
 		unsigned int xsize, ysize;
 		/* Pipe must be off here */
 		I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
-		/* Flush the plane changes */
-		I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+		intel_flush_display_plane(dev_priv, intel_crtc->plane);
 
 		/* Wait for vblank for the disable to take effect */
 		if (IS_GEN2(dev))
@@ -1123,8 +1121,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
 		I915_WRITE(pipeconf_reg, pipeconf);
 		I915_WRITE(dspcntr_reg, dspcntr);
-		/* Flush the plane changes */
-		I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+		intel_flush_display_plane(dev_priv, intel_crtc->plane);
 	}
 
 	j = 0;
@@ -1292,7 +1289,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
 	}
 
 	intel_tv->tv_format = tv_mode->name;
-	drm_connector_property_set_value(connector,
+	drm_object_property_set_value(&connector->base,
 		connector->dev->mode_config.tv_mode_property, i);
 }
 
@@ -1446,7 +1443,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
 	int ret = 0;
 	bool changed = false;
 
-	ret = drm_connector_property_set_value(connector, property, val);
+	ret = drm_object_property_set_value(&connector->base, property, val);
 	if (ret < 0)
 		goto out;
 
@@ -1658,18 +1655,18 @@ intel_tv_init(struct drm_device *dev)
 				      ARRAY_SIZE(tv_modes),
 				      tv_format_names);
 
-	drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+	drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
 				   initial_mode);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				   dev->mode_config.tv_left_margin_property,
 				   intel_tv->margin[TV_MARGIN_LEFT]);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				   dev->mode_config.tv_top_margin_property,
 				   intel_tv->margin[TV_MARGIN_TOP]);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				   dev->mode_config.tv_right_margin_property,
 				   intel_tv->margin[TV_MARGIN_RIGHT]);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				   dev->mode_config.tv_bottom_margin_property,
 				   intel_tv->margin[TV_MARGIN_BOTTOM]);
 	drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index d6a1aae33701..70dd3c5529d4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -133,6 +133,8 @@ static int mga_vram_init(struct mga_device *mdev)
 {
 	void __iomem *mem;
 	struct apertures_struct *aper = alloc_apertures(1);
+	if (!aper)
+		return -ENOMEM;
 
 	/* BAR 0 is VRAM */
 	mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
@@ -140,9 +142,9 @@ static int mga_vram_init(struct mga_device *mdev)
 
 	aper->ranges[0].base = mdev->mc.vram_base;
 	aper->ranges[0].size = mdev->mc.vram_window;
-	aper->count = 1;
 
 	remove_conflicting_framebuffers(aper, "mgafb", true);
+	kfree(aper);
 
 	if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
 				"mgadrmfb_vram")) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 1504699666c4..8fc9d9201945 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
 
 static int mgag200_bo_move(struct ttm_buffer_object *bo,
 		       bool evict, bool interruptible,
-		       bool no_wait_reserve, bool no_wait_gpu,
+		       bool no_wait_gpu,
 		       struct ttm_mem_reg *new_mem)
 {
 	int r;
-	r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 	return r;
 }
 
@@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
 
 	ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
 			  ttm_bo_type_device, &mgabo->placement,
-			  align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+			  align >> PAGE_SHIFT, false, NULL, acc_size,
 			  NULL, mgag200_bo_ttm_destroy);
 	if (ret)
 		return ret;
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
 	mgag200_ttm_placement(bo, pl_flag);
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 	if (ret)
 		return ret;
 
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
 
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 	if (ret)
 		return ret;
 
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 	if (ret) {
 		DRM_ERROR("pushing to VRAM failed\n");
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index a990df4d6c04..ab25752a0b1e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
 nouveau-y += core/core/engctx.o
 nouveau-y += core/core/engine.o
 nouveau-y += core/core/enum.o
+nouveau-y += core/core/falcon.o
 nouveau-y += core/core/gpuobj.o
 nouveau-y += core/core/handle.o
 nouveau-y += core/core/mm.o
@@ -29,6 +30,7 @@ nouveau-y += core/subdev/bios/base.o
 nouveau-y += core/subdev/bios/bit.o
 nouveau-y += core/subdev/bios/conn.o
 nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/disp.o
 nouveau-y += core/subdev/bios/dp.o
 nouveau-y += core/subdev/bios/extdev.o
 nouveau-y += core/subdev/bios/gpio.o
@@ -64,9 +66,19 @@ nouveau-y += core/subdev/devinit/nv50.o
 nouveau-y += core/subdev/fb/base.o
 nouveau-y += core/subdev/fb/nv04.o
 nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv1a.o
 nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv25.o
 nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv35.o
+nouveau-y += core/subdev/fb/nv36.o
 nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv41.o
+nouveau-y += core/subdev/fb/nv44.o
+nouveau-y += core/subdev/fb/nv46.o
+nouveau-y += core/subdev/fb/nv47.o
+nouveau-y += core/subdev/fb/nv49.o
+nouveau-y += core/subdev/fb/nv4e.o
 nouveau-y += core/subdev/fb/nv50.o
 nouveau-y += core/subdev/fb/nvc0.o
 nouveau-y += core/subdev/gpio/base.o
@@ -111,7 +123,10 @@ nouveau-y += core/engine/dmaobj/base.o
 nouveau-y += core/engine/dmaobj/nv04.o
 nouveau-y += core/engine/dmaobj/nv50.o
 nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/dmaobj/nvd0.o
 nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/bsp/nvc0.o
+nouveau-y += core/engine/bsp/nve0.o
 nouveau-y += core/engine/copy/nva3.o
 nouveau-y += core/engine/copy/nvc0.o
 nouveau-y += core/engine/copy/nve0.o
@@ -119,7 +134,21 @@ nouveau-y += core/engine/crypt/nv84.o
 nouveau-y += core/engine/crypt/nv98.o
 nouveau-y += core/engine/disp/nv04.o
 nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nv84.o
+nouveau-y += core/engine/disp/nv94.o
+nouveau-y += core/engine/disp/nva0.o
+nouveau-y += core/engine/disp/nva3.o
 nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/hdanva3.o
+nouveau-y += core/engine/disp/hdanvd0.o
+nouveau-y += core/engine/disp/hdminv84.o
+nouveau-y += core/engine/disp/hdminva3.o
+nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/sornv50.o
+nouveau-y += core/engine/disp/sornv94.o
+nouveau-y += core/engine/disp/sornvd0.o
 nouveau-y += core/engine/disp/vga.o
 nouveau-y += core/engine/fifo/base.o
 nouveau-y += core/engine/fifo/nv04.o
@@ -151,11 +180,14 @@ nouveau-y += core/engine/mpeg/nv40.o
 nouveau-y += core/engine/mpeg/nv50.o
 nouveau-y += core/engine/mpeg/nv84.o
 nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/ppp/nvc0.o
 nouveau-y += core/engine/software/nv04.o
 nouveau-y += core/engine/software/nv10.o
 nouveau-y += core/engine/software/nv50.o
 nouveau-y += core/engine/software/nvc0.o
 nouveau-y += core/engine/vp/nv84.o
+nouveau-y += core/engine/vp/nvc0.o
+nouveau-y += core/engine/vp/nve0.o
 
 # drm/core
 nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -166,7 +198,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
 
 # drm/kms
 nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
-nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
+nouveau-y += nouveau_connector.o nouveau_dp.o
 nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
 
 # drm/kms/nv04:nv50
@@ -175,9 +207,7 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
 nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
 
 # drm/kms/nv50-
-nouveau-y += nv50_display.o nvd0_display.o
-nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
-nouveau-y += nv50_evo.o
+nouveau-y += nv50_display.o
 
 # drm/pm
 nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
index e41b10d5eb59..84c71fad2b6c 100644
--- a/drivers/gpu/drm/nouveau/core/core/engctx.c
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -189,6 +189,21 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
 	return nouveau_gpuobj_fini(&engctx->base, suspend);
 }
 
+int
+_nouveau_engctx_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_engctx *engctx;
+	int ret;
+
+	ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
+				    NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+	*pobject = nv_object(engctx);
+	return ret;
+}
+
 void
 _nouveau_engctx_dtor(struct nouveau_object *object)
 {
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644
index 000000000000..6b0843c33877
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/falcon.h>
+
+#include <subdev/timer.h>
+
+u32
+_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nouveau_falcon *falcon = (void *)object;
+	return nv_rd32(falcon, falcon->addr + addr);
+}
+
+void
+_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nouveau_falcon *falcon = (void *)object;
+	nv_wr32(falcon, falcon->addr + addr, data);
+}
+
+int
+_nouveau_falcon_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nouveau_falcon *falcon = (void *)object;
+	const struct firmware *fw;
+	char name[32] = "internal";
+	int ret, i;
+	u32 caps;
+
+	/* enable engine, and determine its capabilities */
+	ret = nouveau_engine_init(&falcon->base);
+	if (ret)
+		return ret;
+
+	if (device->chipset <  0xa3 ||
+	    device->chipset == 0xaa || device->chipset == 0xac) {
+		falcon->version = 0;
+		falcon->secret  = (falcon->addr == 0x087000) ? 1 : 0;
+	} else {
+		caps = nv_ro32(falcon, 0x12c);
+		falcon->version = (caps & 0x0000000f);
+		falcon->secret  = (caps & 0x00000030) >> 4;
+	}
+
+	caps = nv_ro32(falcon, 0x108);
+	falcon->code.limit = (caps & 0x000001ff) << 8;
+	falcon->data.limit = (caps & 0x0003fe00) >> 1;
+
+	nv_debug(falcon, "falcon version: %d\n", falcon->version);
+	nv_debug(falcon, "secret level: %d\n", falcon->secret);
+	nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
+	nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+
+	/* wait for 'uc halted' to be signalled before continuing */
+	if (falcon->secret) {
+		nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+		nv_wo32(falcon, 0x004, 0x00000010);
+	}
+
+	/* disable all interrupts */
+	nv_wo32(falcon, 0x014, 0xffffffff);
+
+	/* no default ucode provided by the engine implementation, try and
+	 * locate a "self-bootstrapping" firmware image for the engine
+	 */
+	if (!falcon->code.data) {
+		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
+			 device->chipset, falcon->addr >> 12);
+
+		ret = request_firmware(&fw, name, &device->pdev->dev);
+		if (ret == 0) {
+			falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+			falcon->code.size = fw->size;
+			falcon->data.data = NULL;
+			falcon->data.size = 0;
+			release_firmware(fw);
+		}
+
+		falcon->external = true;
+	}
+
+	/* next step is to try and load "static code/data segment" firmware
+	 * images for the engine
+	 */
+	if (!falcon->code.data) {
+		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
+			 device->chipset, falcon->addr >> 12);
+
+		ret = request_firmware(&fw, name, &device->pdev->dev);
+		if (ret) {
+			nv_error(falcon, "unable to load firmware data\n");
+			return ret;
+		}
+
+		falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+		falcon->data.size = fw->size;
+		release_firmware(fw);
+		if (!falcon->data.data)
+			return -ENOMEM;
+
+		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
+			 device->chipset, falcon->addr >> 12);
+
+		ret = request_firmware(&fw, name, &device->pdev->dev);
+		if (ret) {
+			nv_error(falcon, "unable to load firmware code\n");
+			return ret;
+		}
+
+		falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+		falcon->code.size = fw->size;
+		release_firmware(fw);
+		if (!falcon->code.data)
+			return -ENOMEM;
+	}
+
+	nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
+		 "static code/data segments" : "self-bootstrapping");
+
+	/* ensure any "self-bootstrapping" firmware image is in vram */
+	if (!falcon->data.data && !falcon->core) {
+		ret = nouveau_gpuobj_new(object->parent, NULL,
+					 falcon->code.size, 256, 0,
+					&falcon->core);
+		if (ret) {
+			nv_error(falcon, "core allocation failed, %d\n", ret);
+			return ret;
+		}
+
+		for (i = 0; i < falcon->code.size; i += 4)
+			nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+	}
+
+	/* upload firmware bootloader (or the full code segments) */
+	if (falcon->core) {
+		if (device->card_type < NV_C0)
+			nv_wo32(falcon, 0x618, 0x04000000);
+		else
+			nv_wo32(falcon, 0x618, 0x00000114);
+		nv_wo32(falcon, 0x11c, 0);
+		nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
+		nv_wo32(falcon, 0x114, 0);
+		nv_wo32(falcon, 0x118, 0x00006610);
+	} else {
+		if (falcon->code.size > falcon->code.limit ||
+		    falcon->data.size > falcon->data.limit) {
+			nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+			return -EINVAL;
+		}
+
+		if (falcon->version < 3) {
+			nv_wo32(falcon, 0xff8, 0x00100000);
+			for (i = 0; i < falcon->code.size / 4; i++)
+				nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+		} else {
+			nv_wo32(falcon, 0x180, 0x01000000);
+			for (i = 0; i < falcon->code.size / 4; i++) {
+				if ((i & 0x3f) == 0)
+					nv_wo32(falcon, 0x188, i >> 6);
+				nv_wo32(falcon, 0x184, falcon->code.data[i]);
+			}
+		}
+	}
+
+	/* upload data segment (if necessary), zeroing the remainder */
+	if (falcon->version < 3) {
+		nv_wo32(falcon, 0xff8, 0x00000000);
+		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+			nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+		for (; i < falcon->data.limit; i += 4)
+			nv_wo32(falcon, 0xff4, 0x00000000);
+	} else {
+		nv_wo32(falcon, 0x1c0, 0x01000000);
+		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+			nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+		for (; i < falcon->data.limit / 4; i++)
+			nv_wo32(falcon, 0x1c4, 0x00000000);
+	}
+
+	/* start it running */
+	nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+	nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
+	nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
+	nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+	return 0;
+}
+
+int
+_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_falcon *falcon = (void *)object;
+
+	if (!suspend) {
+		nouveau_gpuobj_ref(NULL, &falcon->core);
+		if (falcon->external) {
+			kfree(falcon->data.data);
+			kfree(falcon->code.data);
+			falcon->code.data = NULL;
+		}
+	}
+
+	nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
+	nv_wo32(falcon, 0x014, 0xffffffff);
+
+	return nouveau_engine_fini(&falcon->base, suspend);
+}
+
+int
+nouveau_falcon_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 addr, bool enable,
+		       const char *iname, const char *fname,
+		       int length, void **pobject)
+{
+	struct nouveau_falcon *falcon;
+	int ret;
+
+	ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
+				     fname, length, pobject);
+	falcon = *pobject;
+	if (ret)
+		return ret;
+
+	falcon->addr = addr;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 70586fde69cf..560b2214cf1c 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
 }
 
 u32
-_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
 {
 	struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
 	struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
 }
 
 void
-_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
 	struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
 	struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index a6d3cd6490f7..0261a11b2ae0 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -234,15 +234,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
 int
 nouveau_mm_fini(struct nouveau_mm *mm)
 {
-	struct nouveau_mm_node *node, *heap =
-		list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
-	int nodes = 0;
+	if (nouveau_mm_initialised(mm)) {
+		struct nouveau_mm_node *node, *heap =
+			list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
+		int nodes = 0;
 
-	list_for_each_entry(node, &mm->nodes, nl_entry) {
-		if (WARN_ON(nodes++ == mm->heap_nodes))
-			return -EBUSY;
+		list_for_each_entry(node, &mm->nodes, nl_entry) {
+			if (WARN_ON(nodes++ == mm->heap_nodes))
+				return -EBUSY;
+		}
+
+		kfree(heap);
 	}
 
-	kfree(heap);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 66f7dfd907ee..1d9f614cb97d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -22,18 +22,13 @@
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/class.h>
 #include <core/engctx.h>
+#include <core/class.h>
 
 #include <engine/bsp.h>
 
 struct nv84_bsp_priv {
-	struct nouveau_bsp base;
-};
-
-struct nv84_bsp_chan {
-	struct nouveau_bsp_chan base;
+	struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -49,61 +44,16 @@ nv84_bsp_sclass[] = {
  * BSP context
  ******************************************************************************/
 
-static int
-nv84_bsp_context_ctor(struct nouveau_object *parent,
-		      struct nouveau_object *engine,
-		      struct nouveau_oclass *oclass, void *data, u32 size,
-		      struct nouveau_object **pobject)
-{
-	struct nv84_bsp_chan *priv;
-	int ret;
-
-	ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
-					 0, 0, 0, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void
-nv84_bsp_context_dtor(struct nouveau_object *object)
-{
-	struct nv84_bsp_chan *priv = (void *)object;
-	nouveau_bsp_context_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_context_init(struct nouveau_object *object)
-{
-	struct nv84_bsp_chan *priv = (void *)object;
-	int ret;
-
-	ret = nouveau_bsp_context_init(&priv->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int
-nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
-{
-	struct nv84_bsp_chan *priv = (void *)object;
-	return nouveau_bsp_context_fini(&priv->base, suspend);
-}
-
 static struct nouveau_oclass
 nv84_bsp_cclass = {
 	.handle = NV_ENGCTX(BSP, 0x84),
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv84_bsp_context_ctor,
-		.dtor = nv84_bsp_context_dtor,
-		.init = nv84_bsp_context_init,
-		.fini = nv84_bsp_context_fini,
-		.rd32 = _nouveau_bsp_context_rd32,
-		.wr32 = _nouveau_bsp_context_wr32,
+		.ctor = _nouveau_engctx_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
 	},
 };
 
@@ -111,11 +61,6 @@ nv84_bsp_cclass = {
  * BSP engine/subdev functions
  ******************************************************************************/
 
-static void
-nv84_bsp_intr(struct nouveau_subdev *subdev)
-{
-}
-
 static int
 nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	      struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	struct nv84_bsp_priv *priv;
 	int ret;
 
-	ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PBSP", "bsp", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
 	nv_subdev(priv)->unit = 0x04008000;
-	nv_subdev(priv)->intr = nv84_bsp_intr;
 	nv_engine(priv)->cclass = &nv84_bsp_cclass;
 	nv_engine(priv)->sclass = nv84_bsp_sclass;
 	return 0;
 }
 
-static void
-nv84_bsp_dtor(struct nouveau_object *object)
-{
-	struct nv84_bsp_priv *priv = (void *)object;
-	nouveau_bsp_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_init(struct nouveau_object *object)
-{
-	struct nv84_bsp_priv *priv = (void *)object;
-	int ret;
-
-	ret = nouveau_bsp_init(&priv->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int
-nv84_bsp_fini(struct nouveau_object *object, bool suspend)
-{
-	struct nv84_bsp_priv *priv = (void *)object;
-	return nouveau_bsp_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nv84_bsp_oclass = {
 	.handle = NV_ENGINE(BSP, 0x84),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv84_bsp_ctor,
-		.dtor = nv84_bsp_dtor,
-		.init = nv84_bsp_init,
-		.fini = nv84_bsp_fini,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644
index 000000000000..0a5aa6bb0870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nvc0_bsp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_sclass[] = {
+	{ 0x90b1, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_cclass = {
+	.handle = NV_ENGCTX(BSP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_bsp_init(struct nouveau_object *object)
+{
+	struct nvc0_bsp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x084010, 0x0000fff2);
+	nv_wr32(priv, 0x08401c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nvc0_bsp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+				    "PBSP", "bsp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00008000;
+	nv_engine(priv)->cclass = &nvc0_bsp_cclass;
+	nv_engine(priv)->sclass = nvc0_bsp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_bsp_oclass = {
+	.handle = NV_ENGINE(BSP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_bsp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nvc0_bsp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644
index 000000000000..d4f23bbd75b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nve0_bsp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_sclass[] = {
+	{ 0x95b1, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_cclass = {
+	.handle = NV_ENGCTX(BSP, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_bsp_init(struct nouveau_object *object)
+{
+	struct nve0_bsp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x084010, 0x0000fff2);
+	nv_wr32(priv, 0x08401c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nve0_bsp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+				    "PBSP", "bsp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00008000;
+	nv_engine(priv)->cclass = &nve0_bsp_cclass;
+	nv_engine(priv)->sclass = nve0_bsp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_bsp_oclass = {
+	.handle = NV_ENGINE(BSP, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_bsp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nve0_bsp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 4df6da0af740..283248c7b050 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,10 +22,9 @@
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
 #include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
 
 #include <subdev/fb.h>
 #include <subdev/vm.h>
@@ -36,11 +35,7 @@
 #include "fuc/nva3.fuc.h"
 
 struct nva3_copy_priv {
-	struct nouveau_copy base;
-};
-
-struct nva3_copy_chan {
-	struct nouveau_copy_chan base;
+	struct nouveau_falcon base;
 };
 
 /*******************************************************************************
@@ -57,34 +52,16 @@ nva3_copy_sclass[] = {
  * PCOPY context
  ******************************************************************************/
 
-static int
-nva3_copy_context_ctor(struct nouveau_object *parent,
-		       struct nouveau_object *engine,
-		       struct nouveau_oclass *oclass, void *data, u32 size,
-		       struct nouveau_object **pobject)
-{
-	struct nva3_copy_chan *priv;
-	int ret;
-
-	ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
-					  NVOBJ_FLAG_ZERO_ALLOC, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 static struct nouveau_oclass
 nva3_copy_cclass = {
 	.handle = NV_ENGCTX(COPY0, 0xa3),
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nva3_copy_context_ctor,
-		.dtor = _nouveau_copy_context_dtor,
-		.init = _nouveau_copy_context_init,
-		.fini = _nouveau_copy_context_fini,
-		.rd32 = _nouveau_copy_context_rd32,
-		.wr32 = _nouveau_copy_context_wr32,
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
 
 	},
 };
@@ -100,41 +77,40 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
 	{}
 };
 
-static void
+void
 nva3_copy_intr(struct nouveau_subdev *subdev)
 {
 	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
 	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_falcon *falcon = (void *)subdev;
 	struct nouveau_object *engctx;
-	struct nva3_copy_priv *priv = (void *)subdev;
-	u32 dispatch = nv_rd32(priv, 0x10401c);
-	u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
-	u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
-	u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
-	u32 addr = nv_rd32(priv, 0x104040) >> 16;
+	u32 dispatch = nv_ro32(falcon, 0x01c);
+	u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+	u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
+	u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
+	u32 addr = nv_ro32(falcon, 0x040) >> 16;
 	u32 mthd = (addr & 0x07ff) << 2;
 	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_rd32(priv, 0x104044);
+	u32 data = nv_ro32(falcon, 0x044);
 	int chid;
 
 	engctx = nouveau_engctx_get(engine, inst);
 	chid   = pfifo->chid(pfifo, engctx);
 
 	if (stat & 0x00000040) {
-		nv_error(priv, "DISPATCH_ERROR [");
+		nv_error(falcon, "DISPATCH_ERROR [");
 		nouveau_enum_print(nva3_copy_isr_error_name, ssta);
 		printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
 		       chid, inst << 12, subc, mthd, data);
-		nv_wr32(priv, 0x104004, 0x00000040);
+		nv_wo32(falcon, 0x004, 0x00000040);
 		stat &= ~0x00000040;
 	}
 
 	if (stat) {
-		nv_error(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x104004, stat);
+		nv_error(falcon, "unhandled intr 0x%08x\n", stat);
+		nv_wo32(falcon, 0x004, stat);
 	}
 
-	nv50_fb_trap(nouveau_fb(priv), 1);
 	nouveau_engctx_put(engctx);
 }
 
@@ -154,7 +130,8 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	struct nva3_copy_priv *priv;
 	int ret;
 
-	ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
+				    "PCE0", "copy0", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
@@ -164,59 +141,22 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	nv_engine(priv)->cclass = &nva3_copy_cclass;
 	nv_engine(priv)->sclass = nva3_copy_sclass;
 	nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+	nv_falcon(priv)->code.data = nva3_pcopy_code;
+	nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
+	nv_falcon(priv)->data.data = nva3_pcopy_data;
+	nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
 	return 0;
 }
 
-static int
-nva3_copy_init(struct nouveau_object *object)
-{
-	struct nva3_copy_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nouveau_copy_init(&priv->base);
-	if (ret)
-		return ret;
-
-	/* disable all interrupts */
-	nv_wr32(priv, 0x104014, 0xffffffff);
-
-	/* upload ucode */
-	nv_wr32(priv, 0x1041c0, 0x01000000);
-	for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
-		nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
-
-	nv_wr32(priv, 0x104180, 0x01000000);
-	for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
-		if ((i & 0x3f) == 0)
-			nv_wr32(priv, 0x104188, i >> 6);
-		nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
-	}
-
-	/* start it running */
-	nv_wr32(priv, 0x10410c, 0x00000000);
-	nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
-	nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
-	return 0;
-}
-
-static int
-nva3_copy_fini(struct nouveau_object *object, bool suspend)
-{
-	struct nva3_copy_priv *priv = (void *)object;
-
-	nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
-	nv_wr32(priv, 0x104014, 0xffffffff);
-
-	return nouveau_copy_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nva3_copy_oclass = {
 	.handle = NV_ENGINE(COPY0, 0xa3),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nva3_copy_ctor,
-		.dtor = _nouveau_copy_dtor,
-		.init = nva3_copy_init,
-		.fini = nva3_copy_fini,
+		.dtor = _nouveau_falcon_dtor,
+		.init = _nouveau_falcon_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 06d4a8791055..b3ed2737e21f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -22,10 +22,9 @@
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
 #include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
 
 #include <engine/fifo.h>
 #include <engine/copy.h>
@@ -33,11 +32,7 @@
 #include "fuc/nvc0.fuc.h"
 
 struct nvc0_copy_priv {
-	struct nouveau_copy base;
-};
-
-struct nvc0_copy_chan {
-	struct nouveau_copy_chan base;
+	struct nouveau_falcon base;
 };
 
 /*******************************************************************************
@@ -60,32 +55,14 @@ nvc0_copy1_sclass[] = {
  * PCOPY context
  ******************************************************************************/
 
-static int
-nvc0_copy_context_ctor(struct nouveau_object *parent,
-		       struct nouveau_object *engine,
-		       struct nouveau_oclass *oclass, void *data, u32 size,
-		       struct nouveau_object **pobject)
-{
-	struct nvc0_copy_chan *priv;
-	int ret;
-
-	ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
-					  256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 static struct nouveau_ofuncs
 nvc0_copy_context_ofuncs = {
-	.ctor = nvc0_copy_context_ctor,
-	.dtor = _nouveau_copy_context_dtor,
-	.init = _nouveau_copy_context_init,
-	.fini = _nouveau_copy_context_fini,
-	.rd32 = _nouveau_copy_context_rd32,
-	.wr32 = _nouveau_copy_context_wr32,
+	.ctor = _nouveau_falcon_context_ctor,
+	.dtor = _nouveau_falcon_context_dtor,
+	.init = _nouveau_falcon_context_init,
+	.fini = _nouveau_falcon_context_fini,
+	.rd32 = _nouveau_falcon_context_rd32,
+	.wr32 = _nouveau_falcon_context_wr32,
 };
 
 static struct nouveau_oclass
@@ -104,50 +81,18 @@ nvc0_copy1_cclass = {
  * PCOPY engine/subdev functions
  ******************************************************************************/
 
-static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
-	{ 0x0001, "ILLEGAL_MTHD" },
-	{ 0x0002, "INVALID_ENUM" },
-	{ 0x0003, "INVALID_BITFIELD" },
-	{}
-};
-
-static void
-nvc0_copy_intr(struct nouveau_subdev *subdev)
+static int
+nvc0_copy_init(struct nouveau_object *object)
 {
-	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
-	struct nouveau_engine *engine = nv_engine(subdev);
-	struct nouveau_object *engctx;
-	int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
-	struct nvc0_copy_priv *priv = (void *)subdev;
-	u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
-	u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
-	u32 stat = intr & disp & ~(disp >> 16);
-	u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
-	u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
-	u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
-	u32 mthd = (addr & 0x07ff) << 2;
-	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
-	int chid;
+	struct nvc0_copy_priv *priv = (void *)object;
+	int ret;
 
-	engctx = nouveau_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
 
-	if (stat & 0x00000040) {
-		nv_error(priv, "DISPATCH_ERROR [");
-		nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
-		printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
-		       chid, (u64)inst << 12, subc, mthd, data);
-		nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
-		stat &= ~0x00000040;
-	}
-
-	if (stat) {
-		nv_error(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
-	}
-
-	nouveau_engctx_put(engctx);
+	nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+	return 0;
 }
 
 static int
@@ -161,15 +106,20 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (nv_rd32(parent, 0x022500) & 0x00000100)
 		return -ENODEV;
 
-	ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
+				    "PCE0", "copy0", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
 	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = nvc0_copy_intr;
+	nv_subdev(priv)->intr = nva3_copy_intr;
 	nv_engine(priv)->cclass = &nvc0_copy0_cclass;
 	nv_engine(priv)->sclass = nvc0_copy0_sclass;
+	nv_falcon(priv)->code.data = nvc0_pcopy_code;
+	nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+	nv_falcon(priv)->data.data = nvc0_pcopy_data;
+	nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
 	return 0;
 }
 
@@ -184,72 +134,33 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (nv_rd32(parent, 0x022500) & 0x00000200)
 		return -ENODEV;
 
-	ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
+				    "PCE1", "copy1", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
 	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = nvc0_copy_intr;
+	nv_subdev(priv)->intr = nva3_copy_intr;
 	nv_engine(priv)->cclass = &nvc0_copy1_cclass;
 	nv_engine(priv)->sclass = nvc0_copy1_sclass;
+	nv_falcon(priv)->code.data = nvc0_pcopy_code;
+	nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+	nv_falcon(priv)->data.data = nvc0_pcopy_data;
+	nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
 	return 0;
 }
 
-static int
-nvc0_copy_init(struct nouveau_object *object)
-{
-	int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
-	struct nvc0_copy_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nouveau_copy_init(&priv->base);
-	if (ret)
-		return ret;
-
-	/* disable all interrupts */
-	nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
-	/* upload ucode */
-	nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
-	for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
-		nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
-
-	nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
-	for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
-		if ((i & 0x3f) == 0)
-			nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
-		nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
-	}
-
-	/* start it running */
-	nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
-	nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
-	nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
-	nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
-	return 0;
-}
-
-static int
-nvc0_copy_fini(struct nouveau_object *object, bool suspend)
-{
-	int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
-	struct nvc0_copy_priv *priv = (void *)object;
-
-	nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
-	nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
-	return nouveau_copy_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nvc0_copy0_oclass = {
 	.handle = NV_ENGINE(COPY0, 0xc0),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nvc0_copy0_ctor,
-		.dtor = _nouveau_copy_dtor,
+		.dtor = _nouveau_falcon_dtor,
 		.init = nvc0_copy_init,
-		.fini = nvc0_copy_fini,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
 	},
 };
 
@@ -258,8 +169,10 @@ nvc0_copy1_oclass = {
 	.handle = NV_ENGINE(COPY1, 0xc0),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nvc0_copy1_ctor,
-		.dtor = _nouveau_copy_dtor,
+		.dtor = _nouveau_falcon_dtor,
 		.init = nvc0_copy_init,
-		.fini = nvc0_copy_fini,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 2017c1579ac5..dbbe9e8998fe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -30,11 +30,7 @@
 #include <engine/copy.h>
 
 struct nve0_copy_priv {
-	struct nouveau_copy base;
-};
-
-struct nve0_copy_chan {
-	struct nouveau_copy_chan base;
+	struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -51,32 +47,14 @@ nve0_copy_sclass[] = {
  * PCOPY context
  ******************************************************************************/
 
-static int
-nve0_copy_context_ctor(struct nouveau_object *parent,
-		       struct nouveau_object *engine,
-		       struct nouveau_oclass *oclass, void *data, u32 size,
-		       struct nouveau_object **pobject)
-{
-	struct nve0_copy_chan *priv;
-	int ret;
-
-	ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
-					  256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 static struct nouveau_ofuncs
 nve0_copy_context_ofuncs = {
-	.ctor = nve0_copy_context_ctor,
-	.dtor = _nouveau_copy_context_dtor,
-	.init = _nouveau_copy_context_init,
-	.fini = _nouveau_copy_context_fini,
-	.rd32 = _nouveau_copy_context_rd32,
-	.wr32 = _nouveau_copy_context_wr32,
+	.ctor = _nouveau_engctx_ctor,
+	.dtor = _nouveau_engctx_dtor,
+	.init = _nouveau_engctx_init,
+	.fini = _nouveau_engctx_fini,
+	.rd32 = _nouveau_engctx_rd32,
+	.wr32 = _nouveau_engctx_wr32,
 };
 
 static struct nouveau_oclass
@@ -100,7 +78,8 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (nv_rd32(parent, 0x022500) & 0x00000100)
 		return -ENODEV;
 
-	ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PCE0", "copy0", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
@@ -122,7 +101,8 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (nv_rd32(parent, 0x022500) & 0x00000200)
 		return -ENODEV;
 
-	ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PCE1", "copy1", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
@@ -138,9 +118,9 @@ nve0_copy0_oclass = {
 	.handle = NV_ENGINE(COPY0, 0xe0),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nve0_copy0_ctor,
-		.dtor = _nouveau_copy_dtor,
-		.init = _nouveau_copy_init,
-		.fini = _nouveau_copy_fini,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
 	},
 };
 
@@ -149,8 +129,8 @@ nve0_copy1_oclass = {
 	.handle = NV_ENGINE(COPY1, 0xe0),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nve0_copy1_ctor,
-		.dtor = _nouveau_copy_dtor,
-		.init = _nouveau_copy_init,
-		.fini = _nouveau_copy_fini,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 1d85e5b66ca0..b97490512723 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -34,11 +34,7 @@
 #include <engine/crypt.h>
 
 struct nv84_crypt_priv {
-	struct nouveau_crypt base;
-};
-
-struct nv84_crypt_chan {
-	struct nouveau_crypt_chan base;
+	struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -87,34 +83,16 @@ nv84_crypt_sclass[] = {
  * PCRYPT context
  ******************************************************************************/
 
-static int
-nv84_crypt_context_ctor(struct nouveau_object *parent,
-			struct nouveau_object *engine,
-			struct nouveau_oclass *oclass, void *data, u32 size,
-			struct nouveau_object **pobject)
-{
-	struct nv84_crypt_chan *priv;
-	int ret;
-
-	ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
-					   0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 static struct nouveau_oclass
 nv84_crypt_cclass = {
 	.handle = NV_ENGCTX(CRYPT, 0x84),
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv84_crypt_context_ctor,
-		.dtor = _nouveau_crypt_context_dtor,
-		.init = _nouveau_crypt_context_init,
-		.fini = _nouveau_crypt_context_fini,
-		.rd32 = _nouveau_crypt_context_rd32,
-		.wr32 = _nouveau_crypt_context_wr32,
+		.ctor = _nouveau_engctx_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
 	},
 };
 
@@ -157,7 +135,6 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
 	nv_wr32(priv, 0x102130, stat);
 	nv_wr32(priv, 0x10200c, 0x10);
 
-	nv50_fb_trap(nouveau_fb(priv), 1);
 	nouveau_engctx_put(engctx);
 }
 
@@ -176,7 +153,8 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	struct nv84_crypt_priv *priv;
 	int ret;
 
-	ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PCRYPT", "crypt", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
@@ -195,7 +173,7 @@ nv84_crypt_init(struct nouveau_object *object)
 	struct nv84_crypt_priv *priv = (void *)object;
 	int ret;
 
-	ret = nouveau_crypt_init(&priv->base);
+	ret = nouveau_engine_init(&priv->base);
 	if (ret)
 		return ret;
 
@@ -210,8 +188,8 @@ nv84_crypt_oclass = {
 	.handle = NV_ENGINE(CRYPT, 0x84),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv84_crypt_ctor,
-		.dtor = _nouveau_crypt_dtor,
+		.dtor = _nouveau_engine_dtor,
 		.init = nv84_crypt_init,
-		.fini = _nouveau_crypt_fini,
+		.fini = _nouveau_engine_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 9e3876c89b96..21986f3bf0c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -26,6 +26,7 @@
 #include <core/enum.h>
 #include <core/class.h>
 #include <core/engctx.h>
+#include <core/falcon.h>
 
 #include <subdev/timer.h>
 #include <subdev/fb.h>
@@ -36,11 +37,7 @@
 #include "fuc/nv98.fuc.h"
 
 struct nv98_crypt_priv {
-	struct nouveau_crypt base;
-};
-
-struct nv98_crypt_chan {
-	struct nouveau_crypt_chan base;
+	struct nouveau_falcon base;
 };
 
 /*******************************************************************************
@@ -57,34 +54,16 @@ nv98_crypt_sclass[] = {
  * PCRYPT context
  ******************************************************************************/
 
-static int
-nv98_crypt_context_ctor(struct nouveau_object *parent,
-			struct nouveau_object *engine,
-			struct nouveau_oclass *oclass, void *data, u32 size,
-			struct nouveau_object **pobject)
-{
-	struct nv98_crypt_chan *priv;
-	int ret;
-
-	ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
-					   256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 static struct nouveau_oclass
 nv98_crypt_cclass = {
 	.handle = NV_ENGCTX(CRYPT, 0x98),
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv98_crypt_context_ctor,
-		.dtor = _nouveau_crypt_context_dtor,
-		.init = _nouveau_crypt_context_init,
-		.fini = _nouveau_crypt_context_fini,
-		.rd32 = _nouveau_crypt_context_rd32,
-		.wr32 = _nouveau_crypt_context_wr32,
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
 	},
 };
 
@@ -134,7 +113,6 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
 		nv_wr32(priv, 0x087004, stat);
 	}
 
-	nv50_fb_trap(nouveau_fb(priv), 1);
 	nouveau_engctx_put(engctx);
 }
 
@@ -153,7 +131,8 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	struct nv98_crypt_priv *priv;
 	int ret;
 
-	ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
+				    "PCRYPT", "crypt", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
@@ -163,36 +142,10 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	nv_engine(priv)->cclass = &nv98_crypt_cclass;
 	nv_engine(priv)->sclass = nv98_crypt_sclass;
 	nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
-	return 0;
-}
-
-static int
-nv98_crypt_init(struct nouveau_object *object)
-{
-	struct nv98_crypt_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nouveau_crypt_init(&priv->base);
-	if (ret)
-		return ret;
-
-	/* wait for exit interrupt to signal */
-	nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x087004, 0x00000010);
-
-	/* upload microcode code and data segments */
-	nv_wr32(priv, 0x087ff8, 0x00100000);
-	for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
-		nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
-
-	nv_wr32(priv, 0x087ff8, 0x00000000);
-	for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
-		nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
-
-	/* start it running */
-	nv_wr32(priv, 0x08710c, 0x00000000);
-	nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
-	nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
+	nv_falcon(priv)->code.data = nv98_pcrypt_code;
+	nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
+	nv_falcon(priv)->data.data = nv98_pcrypt_data;
+	nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
 	return 0;
 }
 
@@ -201,8 +154,10 @@ nv98_crypt_oclass = {
 	.handle = NV_ENGINE(CRYPT, 0x98),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv98_crypt_ctor,
-		.dtor = _nouveau_crypt_dtor,
-		.init = nv98_crypt_init,
-		.fini = _nouveau_crypt_fini,
+		.dtor = _nouveau_falcon_dtor,
+		.init = _nouveau_falcon_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644
index 000000000000..d0817d94454c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+	const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
+		         (data & NV50_DISP_DAC_PWR_VSYNC) |
+		         (data & NV50_DISP_DAC_PWR_DATA) |
+		         (data & NV50_DISP_DAC_PWR_STATE);
+	const u32 doff = (or * 0x800);
+	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	return 0;
+}
+
+int
+nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+{
+	const u32 doff = (or * 0x800);
+	int load = -EINVAL;
+	nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+	udelay(9500);
+	nv_wr32(priv, 0x61a00c + doff, 0x80000000);
+	load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
+	nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+	return load;
+}
+
+int
+nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
+	u32 *data = args;
+	int ret;
+
+	if (size < sizeof(u32))
+		return -EINVAL;
+
+	switch (mthd & ~0x3f) {
+	case NV50_DISP_DAC_PWR:
+		ret = priv->dac.power(priv, or, data[0]);
+		break;
+	case NV50_DISP_DAC_LOAD:
+		ret = priv->dac.sense(priv, or, data[0]);
+		if (ret >= 0) {
+			data[0] = ret;
+			ret = 0;
+		}
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644
index 000000000000..373dbcc523b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+	const u32 soff = (or * 0x800);
+	int i;
+
+	if (data && data[0]) {
+		for (i = 0; i < size; i++)
+			nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+	} else
+	if (data) {
+		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
+	} else {
+		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644
index 000000000000..dc57e24fc1df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+int
+nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+	const u32 soff = (or * 0x030);
+	int i;
+
+	if (data && data[0]) {
+		for (i = 0; i < size; i++)
+			nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+	} else
+	if (data) {
+		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
+	} else {
+		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644
index 000000000000..0d36bdc51417
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+	const u32 hoff = (head * 0x800);
+
+	if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+		nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+		nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+		nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+		return 0;
+	}
+
+	/* AVI InfoFrame */
+	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
+	nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
+	nv_wr32(priv, 0x616530 + hoff, 0x00000000);
+	nv_wr32(priv, 0x616534 + hoff, 0x00000000);
+	nv_wr32(priv, 0x616538 + hoff, 0x00000000);
+	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+
+	/* Audio InfoFrame */
+	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
+	nv_wr32(priv, 0x61650c + hoff, 0x00000071);
+	nv_wr32(priv, 0x616510 + hoff, 0x00000000);
+	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+
+	/* ??? */
+	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+	/* HDMI_CTRL */
+	nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644
index 000000000000..f065fc248adf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+	const u32 soff = (or * 0x800);
+
+	if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+		nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+		nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+		nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+		return 0;
+	}
+
+	/* AVI InfoFrame */
+	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
+	nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
+	nv_wr32(priv, 0x61c530 + soff, 0x00000000);
+	nv_wr32(priv, 0x61c534 + soff, 0x00000000);
+	nv_wr32(priv, 0x61c538 + soff, 0x00000000);
+	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+
+	/* Audio InfoFrame */
+	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
+	nv_wr32(priv, 0x61c50c + soff, 0x00000071);
+	nv_wr32(priv, 0x61c510 + soff, 0x00000000);
+	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+
+	/* ??? */
+	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+	/* HDMI_CTRL */
+	nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644
index 000000000000..5151bb261832
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+	const u32 hoff = (head * 0x800);
+
+	if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+		nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
+		nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+		nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+		return 0;
+	}
+
+	/* AVI InfoFrame */
+	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
+	nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
+	nv_wr32(priv, 0x616724 + hoff, 0x00000000);
+	nv_wr32(priv, 0x616728 + hoff, 0x00000000);
+	nv_wr32(priv, 0x61672c + hoff, 0x00000000);
+	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+	/* ??? InfoFrame? */
+	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
+	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+	/* HDMI_CTRL */
+	nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+
+	/* NFI, audio doesn't work without it though.. */
+	nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 15b182c84ce8..0f09af135415 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -22,20 +22,740 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
 
 #include <engine/software.h>
 #include <engine/disp.h>
 
-struct nv50_disp_priv {
-	struct nouveau_disp base;
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO channel base class
+ ******************************************************************************/
+
+int
+nv50_disp_chan_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, int chid,
+		       int length, void **pobject)
+{
+	struct nv50_disp_base *base = (void *)parent;
+	struct nv50_disp_chan *chan;
+	int ret;
+
+	if (base->chan & (1 << chid))
+		return -EBUSY;
+	base->chan |= (1 << chid);
+
+	ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+				     (1ULL << NVDEV_ENGINE_DMAOBJ),
+				     length, pobject);
+	chan = *pobject;
+	if (ret)
+		return ret;
+
+	chan->chid = chid;
+	return 0;
+}
+
+void
+nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
+	base->chan &= ~(1 << chan->chid);
+	nouveau_namedb_destroy(&chan->base);
+}
+
+u32
+nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_chan *chan = (void *)object;
+	return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+}
+
+void
+nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_chan *chan = (void *)object;
+	nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+}
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_dmac_object_attach(struct nouveau_object *parent,
+			     struct nouveau_object *object, u32 name)
+{
+	struct nv50_disp_base *base = (void *)parent->parent;
+	struct nv50_disp_chan *chan = (void *)parent;
+	u32 addr = nv_gpuobj(object)->node->offset;
+	u32 chid = chan->chid;
+	u32 data = (chid << 28) | (addr << 10) | chid;
+	return nouveau_ramht_insert(base->ramht, chid, name, data);
+}
+
+static void
+nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+	struct nv50_disp_base *base = (void *)parent->parent;
+	nouveau_ramht_remove(base->ramht, cookie);
+}
+
+int
+nv50_disp_dmac_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+		       int length, void **pobject)
+{
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+				     length, pobject);
+	dmac = *pobject;
+	if (ret)
+		return ret;
+
+	dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+	if (!dmac->pushdma)
+		return -ENOENT;
+
+	switch (nv_mclass(dmac->pushdma)) {
+	case 0x0002:
+	case 0x003d:
+		if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
+			return -EINVAL;
+
+		switch (dmac->pushdma->target) {
+		case NV_MEM_TARGET_VRAM:
+			dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+			break;
+		case NV_MEM_TARGET_PCI_NOSNOOP:
+			dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void
+nv50_disp_dmac_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_dmac *dmac = (void *)object;
+	nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+	nv50_disp_chan_destroy(&dmac->base);
+}
+
+static int
+nv50_disp_dmac_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *dmac = (void *)object;
+	int chid = dmac->base.chid;
+	int ret;
+
+	ret = nv50_disp_chan_init(&dmac->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+
+	/* initialise channel for dma command submission */
+	nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
+	nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
+	nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
+	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+	nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
+		nv_error(dmac, "init timeout, 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *dmac = (void *)object;
+	int chid = dmac->base.chid;
+
+	/* deactivate channel */
+	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+		nv_error(dmac, "fini timeout, 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+
+	return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nv50_disp_mast_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_mast_class *args = data;
+	struct nv50_disp_dmac *mast;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     0, sizeof(*mast), (void **)&mast);
+	*pobject = nv_object(mast);
+	if (ret)
+		return ret;
+
+	nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
+	nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
+	return 0;
+}
+
+static int
+nv50_disp_mast_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *mast = (void *)object;
+	int ret;
+
+	ret = nv50_disp_chan_init(&mast->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+
+	/* attempt to unstick channel from some unknown state */
+	if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
+		nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
+	if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
+		nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
+
+	/* initialise channel for dma command submission */
+	nv_wr32(priv, 0x610204, mast->push);
+	nv_wr32(priv, 0x610208, 0x00010000);
+	nv_wr32(priv, 0x61020c, 0x00000000);
+	nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
+	nv_wr32(priv, 0x640000, 0x00000000);
+	nv_wr32(priv, 0x610200, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
+		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *mast = (void *)object;
+
+	/* deactivate channel */
+	nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
+	nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
+	if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
+		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
+
+	return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_mast_ofuncs = {
+	.ctor = nv50_disp_mast_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nv50_disp_mast_init,
+	.fini = nv50_disp_mast_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_sync_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_sync_class *args = data;
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	if (size < sizeof(*data) || args->head > 1)
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     1 + args->head, sizeof(*dmac),
+				     (void **)&dmac);
+	*pobject = nv_object(dmac);
+	if (ret)
+		return ret;
+
+	nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+	nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_sync_ofuncs = {
+	.ctor = nv50_disp_sync_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nv50_disp_dmac_init,
+	.fini = nv50_disp_dmac_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_ovly_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_ovly_class *args = data;
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	if (size < sizeof(*data) || args->head > 1)
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     3 + args->head, sizeof(*dmac),
+				     (void **)&dmac);
+	*pobject = nv_object(dmac);
+	if (ret)
+		return ret;
+
+	nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+	nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_ovly_ofuncs = {
+	.ctor = nv50_disp_ovly_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nv50_disp_dmac_init,
+	.fini = nv50_disp_dmac_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_pioc_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, int chid,
+		       int length, void **pobject)
+{
+	return nv50_disp_chan_create_(parent, engine, oclass, chid,
+				      length, pobject);
+}
+
+static void
+nv50_disp_pioc_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_pioc *pioc = (void *)object;
+	nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nv50_disp_pioc_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_pioc *pioc = (void *)object;
+	int chid = pioc->base.chid;
+	int ret;
+
+	ret = nv50_disp_chan_init(&pioc->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
+		nv_error(pioc, "timeout0: 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
+		nv_error(pioc, "timeout1: 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_pioc *pioc = (void *)object;
+	int chid = pioc->base.chid;
+
+	nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
+		nv_error(pioc, "timeout: 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_oimm_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_oimm_class *args = data;
+	struct nv50_disp_pioc *pioc;
+	int ret;
+
+	if (size < sizeof(*args) || args->head > 1)
+		return -EINVAL;
+
+	ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+				     sizeof(*pioc), (void **)&pioc);
+	*pobject = nv_object(pioc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_oimm_ofuncs = {
+	.ctor = nv50_disp_oimm_ctor,
+	.dtor = nv50_disp_pioc_dtor,
+	.init = nv50_disp_pioc_init,
+	.fini = nv50_disp_pioc_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_curs_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_curs_class *args = data;
+	struct nv50_disp_pioc *pioc;
+	int ret;
+
+	if (size < sizeof(*args) || args->head > 1)
+		return -EINVAL;
+
+	ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+				     sizeof(*pioc), (void **)&pioc);
+	*pobject = nv_object(pioc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_curs_ofuncs = {
+	.ctor = nv50_disp_curs_ctor,
+	.dtor = nv50_disp_pioc_dtor,
+	.init = nv50_disp_pioc_init,
+	.fini = nv50_disp_pioc_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nv50_disp_base_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_base *base;
+	int ret;
+
+	ret = nouveau_parent_create(parent, engine, oclass, 0,
+				    priv->sclass, 0, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nv50_disp_base_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_base *base = (void *)object;
+	nouveau_ramht_ref(NULL, &base->ramht);
+	nouveau_parent_destroy(&base->base);
+}
+
+static int
+nv50_disp_base_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_base *base = (void *)object;
+	int ret, i;
+	u32 tmp;
+
+	ret = nouveau_parent_init(&base->base);
+	if (ret)
+		return ret;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.  NFI what the 0x614004 caps are for..
+	 */
+	tmp = nv_rd32(priv, 0x614004);
+	nv_wr32(priv, 0x610184, tmp);
+
+	/* ... CRTC caps */
+	for (i = 0; i < priv->head.nr; i++) {
+		tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
+		nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
+		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+		nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
+		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+		nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
+		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+		nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < priv->dac.nr; i++) {
+		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+		nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < priv->sor.nr; i++) {
+		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+		nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
+	}
+
+	/* ... EXT caps */
+	for (i = 0; i < 3; i++) {
+		tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
+		nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nv_rd32(priv, 0x610024) & 0x00000100) {
+		nv_wr32(priv, 0x610024, 0x00000100);
+		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+			nv_error(priv, "timeout acquiring display\n");
+			return -EBUSY;
+		}
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nv_wr32(priv, 0x61002c, 0x00000370);
+	nv_wr32(priv, 0x610028, 0x00000000);
+	return 0;
+}
+
+static int
+nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_base *base = (void *)object;
+
+	/* disable all interrupts */
+	nv_wr32(priv, 0x610024, 0x00000000);
+	nv_wr32(priv, 0x610020, 0x00000000);
+
+	return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_base_ofuncs = {
+	.ctor = nv50_disp_base_ctor,
+	.dtor = nv50_disp_base_dtor,
+	.init = nv50_disp_base_init,
+	.fini = nv50_disp_base_fini,
+};
+
+static struct nouveau_omthds
+nv50_disp_base_omthds[] = {
+	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+	{},
+};
+
+static struct nouveau_oclass
+nv50_disp_base_oclass[] = {
+	{ NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+	{}
 };
 
 static struct nouveau_oclass
 nv50_disp_sclass[] = {
-	{},
+	{ NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
 };
 
+/*******************************************************************************
+ * Display context, tracks instmem allocation and prevents more than one
+ * client using the display hardware at any time.
+ ******************************************************************************/
+
+static int
+nv50_disp_data_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nouveau_engctx *ectx;
+	int ret = -EBUSY;
+
+	/* no context needed for channel objects... */
+	if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+		atomic_inc(&parent->refcount);
+		*pobject = parent;
+		return 0;
+	}
+
+	/* allocate display hardware to client */
+	mutex_lock(&nv_subdev(priv)->mutex);
+	if (list_empty(&nv_engine(priv)->contexts)) {
+		ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+					    0x10000, 0x10000,
+					    NVOBJ_FLAG_HEAP, &ectx);
+		*pobject = nv_object(ectx);
+	}
+	mutex_unlock(&nv_subdev(priv)->mutex);
+	return ret;
+}
+
+struct nouveau_oclass
+nv50_disp_cclass = {
+	.handle = NV_ENGCTX(DISP, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_disp_data_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
+	},
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv)
+{
+	u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
+	u32 addr, data;
+	int chid;
+
+	for (chid = 0; chid < 5; chid++) {
+		if (!(channels & (1 << chid)))
+			continue;
+
+		nv_wr32(priv, 0x610020, 0x00010000 << chid);
+		addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+		data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+		nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+
+		nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
+			 chid, addr & 0xffc, data, addr);
+	}
+}
+
 static void
 nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
 {
@@ -80,30 +800,422 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
 		disp->vblank.notify(disp->vblank.data, crtc);
 }
 
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+	    struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	    struct nvbios_outp *info)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	u16 mask, type, data;
+
+	if (outp < 4) {
+		type = DCB_OUTPUT_ANALOG;
+		mask = 0;
+	} else {
+		outp -= 4;
+		switch (ctrl & 0x00000f00) {
+		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+		default:
+			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+			return 0x0000;
+		}
+	}
+
+	mask  = 0x00c0 & (mask << 6);
+	mask |= 0x0001 << outp;
+	mask |= 0x0100 << head;
+
+	data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+	if (!data)
+		return 0x0000;
+
+	return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_outp info;
+	struct dcb_output dcb;
+	u8  ver, hdr, cnt, len;
+	u16 data;
+	u32 ctrl = 0x00000000;
+	int i;
+
+	for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+		ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+
+	if (nv_device(priv)->chipset  < 0x90 ||
+	    nv_device(priv)->chipset == 0x92 ||
+	    nv_device(priv)->chipset == 0xa0) {
+		for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+			ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+		i += 3;
+	} else {
+		for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+			ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+		i += 3;
+	}
+
+	if (!(ctrl & (1 << head)))
+		return false;
+
+	data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+	if (data) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(priv),
+			.bios = bios,
+			.offset = info.script[id],
+			.outp = &dcb,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		return nvbios_exec(&init) == 0;
+	}
+
+	return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
+	    struct dcb_output *outp)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_outp info1;
+	struct nvbios_ocfg info2;
+	u8  ver, hdr, cnt, len;
+	u16 data, conf;
+	u32 ctrl = 0x00000000;
+	int i;
+
+	for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+		ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+
+	if (nv_device(priv)->chipset  < 0x90 ||
+	    nv_device(priv)->chipset == 0x92 ||
+	    nv_device(priv)->chipset == 0xa0) {
+		for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+			ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+		i += 3;
+	} else {
+		for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+			ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+		i += 3;
+	}
+
+	if (!(ctrl & (1 << head)))
+		return 0x0000;
+
+	data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
+	if (!data)
+		return 0x0000;
+
+	switch (outp->type) {
+	case DCB_OUTPUT_TMDS:
+		conf = (ctrl & 0x00000f00) >> 8;
+		if (pclk >= 165000)
+			conf |= 0x0100;
+		break;
+	case DCB_OUTPUT_LVDS:
+		conf = priv->sor.lvdsconf;
+		break;
+	case DCB_OUTPUT_DP:
+		conf = (ctrl & 0x00000f00) >> 8;
+		break;
+	case DCB_OUTPUT_ANALOG:
+	default:
+		conf = 0x00ff;
+		break;
+	}
+
+	data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+	if (data) {
+		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+		if (data) {
+			struct nvbios_init init = {
+				.subdev = nv_subdev(priv),
+				.bios = bios,
+				.offset = data,
+				.outp = outp,
+				.crtc = head,
+				.execute = 1,
+			};
+
+			if (nvbios_exec(&init))
+				return 0x0000;
+			return conf;
+		}
+	}
+
+	return 0x0000;
+}
+
 static void
+nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+{
+	int head = ffs((super & 0x00000060) >> 5) - 1;
+	if (head >= 0) {
+		head = ffs((super & 0x00000180) >> 7) - 1;
+		if (head >= 0)
+			exec_script(priv, head, 1);
+	}
+
+	nv_wr32(priv, 0x610024, 0x00000010);
+	nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
+		        struct dcb_output *outp, u32 pclk)
+{
+	const int link = !(outp->sorconf.link & 1);
+	const int   or = ffs(outp->or) - 1;
+	const u32 soff = (  or * 0x800);
+	const u32 loff = (link * 0x080) + soff;
+	const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+	const u32 symbol = 100000;
+	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+	u32 clksor = nv_rd32(priv, 0x614300 + soff);
+	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+	int TU, VTUi, VTUf, VTUa;
+	u64 link_data_rate, link_ratio, unk;
+	u32 best_diff = 64 * symbol;
+	u32 link_nr, link_bw, bits, r;
+
+	/* calculate packed data rate for each lane */
+	if      (dpctrl > 0x00030000) link_nr = 4;
+	else if (dpctrl > 0x00010000) link_nr = 2;
+	else			      link_nr = 1;
+
+	if (clksor & 0x000c0000)
+		link_bw = 270000;
+	else
+		link_bw = 162000;
+
+	if      ((ctrl & 0xf0000) == 0x60000) bits = 30;
+	else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
+	else                                  bits = 18;
+
+	link_data_rate = (pclk * bits / 8) / link_nr;
+
+	/* calculate ratio of packed data rate to link symbol rate */
+	link_ratio = link_data_rate * symbol;
+	r = do_div(link_ratio, link_bw);
+
+	for (TU = 64; TU >= 32; TU--) {
+		/* calculate average number of valid symbols in each TU */
+		u32 tu_valid = link_ratio * TU;
+		u32 calc, diff;
+
+		/* find a hw representation for the fraction.. */
+		VTUi = tu_valid / symbol;
+		calc = VTUi * symbol;
+		diff = tu_valid - calc;
+		if (diff) {
+			if (diff >= (symbol / 2)) {
+				VTUf = symbol / (symbol - diff);
+				if (symbol - (VTUf * diff))
+					VTUf++;
+
+				if (VTUf <= 15) {
+					VTUa  = 1;
+					calc += symbol - (symbol / VTUf);
+				} else {
+					VTUa  = 0;
+					VTUf  = 1;
+					calc += symbol;
+				}
+			} else {
+				VTUa  = 0;
+				VTUf  = min((int)(symbol / diff), 15);
+				calc += symbol / VTUf;
+			}
+
+			diff = calc - tu_valid;
+		} else {
+			/* no remainder, but the hw doesn't like the fractional
+			 * part to be zero.  decrement the integer part and
+			 * have the fraction add a whole symbol back
+			 */
+			VTUa = 0;
+			VTUf = 1;
+			VTUi--;
+		}
+
+		if (diff < best_diff) {
+			best_diff = diff;
+			bestTU = TU;
+			bestVTUa = VTUa;
+			bestVTUf = VTUf;
+			bestVTUi = VTUi;
+			if (diff == 0)
+				break;
+		}
+	}
+
+	if (!bestTU) {
+		nv_error(priv, "unable to find suitable dp config\n");
+		return;
+	}
+
+	/* XXX close to vbios numbers, but not right */
+	unk  = (symbol - link_ratio) * bestTU;
+	unk *= link_ratio;
+	r = do_div(unk, symbol);
+	r = do_div(unk, symbol);
+	unk += 6;
+
+	nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+	nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+						   bestVTUf << 16 |
+						   bestVTUi << 8 | unk);
+}
+
+static void
+nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+{
+	struct dcb_output outp;
+	u32 addr, mask, data;
+	int head;
+
+	/* finish detaching encoder? */
+	head = ffs((super & 0x00000180) >> 7) - 1;
+	if (head >= 0)
+		exec_script(priv, head, 2);
+
+	/* check whether a vpll change is required */
+	head = ffs((super & 0x00000600) >> 9) - 1;
+	if (head >= 0) {
+		u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+		if (pclk) {
+			struct nouveau_clock *clk = nouveau_clock(priv);
+			clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+		}
+
+		nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
+	}
+
+	/* (re)attach the relevant OR to the head */
+	head = ffs((super & 0x00000180) >> 7) - 1;
+	if (head >= 0) {
+		u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+		u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
+		if (conf) {
+			if (outp.type == DCB_OUTPUT_ANALOG) {
+				addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+				mask = 0xffffffff;
+				data = 0x00000000;
+			} else {
+				if (outp.type == DCB_OUTPUT_DP)
+					nv50_disp_intr_unk20_dp(priv, &outp, pclk);
+				addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+				mask = 0x00000707;
+				data = (conf & 0x0100) ? 0x0101 : 0x0000;
+			}
+
+			nv_mask(priv, addr, mask, data);
+		}
+	}
+
+	nv_wr32(priv, 0x610024, 0x00000020);
+	nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	const int link = !(outp->sorconf.link & 1);
+	const int   or = ffs(outp->or) - 1;
+	const u32 loff = (or * 0x800) + (link * 0x80);
+	const u16 mask = (outp->sorconf.link << 6) | outp->or;
+	u8  ver, hdr;
+
+	if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+		nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+}
+
+static void
+nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+{
+	int head = ffs((super & 0x00000180) >> 7) - 1;
+	if (head >= 0) {
+		struct dcb_output outp;
+		u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+		if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
+			if (outp.type == DCB_OUTPUT_TMDS)
+				nv50_disp_intr_unk40_tmds(priv, &outp);
+		}
+	}
+
+	nv_wr32(priv, 0x610024, 0x00000040);
+	nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+{
+	u32 super = nv_rd32(priv, 0x610030);
+
+	nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+
+	if (intr1 & 0x00000010)
+		nv50_disp_intr_unk10(priv, super);
+	if (intr1 & 0x00000020)
+		nv50_disp_intr_unk20(priv, super);
+	if (intr1 & 0x00000040)
+		nv50_disp_intr_unk40(priv, super);
+}
+
+void
 nv50_disp_intr(struct nouveau_subdev *subdev)
 {
 	struct nv50_disp_priv *priv = (void *)subdev;
-	u32 stat1 = nv_rd32(priv, 0x610024);
+	u32 intr0 = nv_rd32(priv, 0x610020);
+	u32 intr1 = nv_rd32(priv, 0x610024);
 
-	if (stat1 & 0x00000004) {
+	if (intr0 & 0x001f0000) {
+		nv50_disp_intr_error(priv);
+		intr0 &= ~0x001f0000;
+	}
+
+	if (intr1 & 0x00000004) {
 		nv50_disp_intr_vblank(priv, 0);
 		nv_wr32(priv, 0x610024, 0x00000004);
-		stat1 &= ~0x00000004;
+		intr1 &= ~0x00000004;
 	}
 
-	if (stat1 & 0x00000008) {
+	if (intr1 & 0x00000008) {
 		nv50_disp_intr_vblank(priv, 1);
 		nv_wr32(priv, 0x610024, 0x00000008);
-		stat1 &= ~0x00000008;
+		intr1 &= ~0x00000008;
 	}
 
+	if (intr1 & 0x00000070) {
+		nv50_disp_intr_super(priv, intr1);
+		intr1 &= ~0x00000070;
+	}
 }
 
 static int
 nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		  struct nouveau_oclass *oclass, void *data, u32 size,
-		  struct nouveau_object **pobject)
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
 {
 	struct nv50_disp_priv *priv;
 	int ret;
@@ -114,8 +1226,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	nv_engine(priv)->sclass = nv50_disp_sclass;
+	nv_engine(priv)->sclass = nv50_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
 	nv_subdev(priv)->intr = nv50_disp_intr;
+	priv->sclass = nv50_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 2;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
 
 	INIT_LIST_HEAD(&priv->base.vblank.list);
 	spin_lock_init(&priv->base.vblank.lock);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644
index 000000000000..a6bb931450f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -0,0 +1,142 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/ramht.h>
+
+#include <engine/dmaobj.h>
+#include <engine/disp.h>
+
+struct dcb_output;
+
+struct nv50_disp_priv {
+	struct nouveau_disp base;
+	struct nouveau_oclass *sclass;
+	struct {
+		int nr;
+	} head;
+	struct {
+		int nr;
+		int (*power)(struct nv50_disp_priv *, int dac, u32 data);
+		int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+	} dac;
+	struct {
+		int nr;
+		int (*power)(struct nv50_disp_priv *, int sor, u32 data);
+		int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
+		int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+		int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
+				     int head, u16 type, u16 mask, u32 data,
+				     struct dcb_output *);
+		int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
+				     int head, u16 type, u16 mask, u32 data,
+				     struct dcb_output *);
+		int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
+				u16 type, u16 mask, u32 data,
+				struct dcb_output *);
+		int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
+				 int head, u16 type, u16 mask, u32 data,
+				 struct dcb_output *);
+		int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
+				 int lane, u16 type, u16 mask, u32 data,
+				 struct dcb_output *);
+		u32 lvdsconf;
+	} sor;
+};
+
+#define DAC_MTHD(n) (n), (n) + 0x03
+
+int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_dac_power(struct nv50_disp_priv *, int, u32);
+int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+
+#define SOR_MTHD(n) (n), (n) + 0x3f
+
+int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+
+int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+
+int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+
+int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
+		           u32, struct dcb_output *);
+int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
+		           u32, struct dcb_output *);
+int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+		      struct dcb_output *);
+int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+		       struct dcb_output *);
+int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+		       struct dcb_output *);
+
+int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+		      struct dcb_output *);
+int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+		       struct dcb_output *);
+int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+		       struct dcb_output *);
+
+struct nv50_disp_base {
+	struct nouveau_parent base;
+	struct nouveau_ramht *ramht;
+	u32 chan;
+};
+
+struct nv50_disp_chan {
+	struct nouveau_namedb base;
+	int chid;
+};
+
+int  nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, int, void **);
+void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+u32  nv50_disp_chan_rd32(struct nouveau_object *, u64);
+void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
+
+#define nv50_disp_chan_init(a)                                                 \
+	nouveau_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b)                                               \
+	nouveau_namedb_fini(&(a)->base, (b))
+
+int  nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32, int, int, void **);
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
+struct nv50_disp_dmac {
+	struct nv50_disp_chan base;
+	struct nouveau_dmaobj *pushdma;
+	u32 push;
+};
+
+struct nv50_disp_pioc {
+	struct nv50_disp_chan base;
+};
+
+extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr(struct nouveau_subdev *);
+
+extern struct nouveau_omthds nv84_disp_base_omthds[];
+
+extern struct nouveau_omthds nva3_disp_base_omthds[];
+
+extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644
index 000000000000..fc84eacdfbec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv84_disp_sclass[] = {
+	{ NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
+};
+
+struct nouveau_omthds
+nv84_disp_base_omthds[] = {
+	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+	{},
+};
+
+static struct nouveau_oclass
+nv84_disp_base_oclass[] = {
+	{ NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+	{}
+};
+
+static int
+nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nv84_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+	priv->sclass = nv84_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 2;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hdmi = nv84_hdmi_ctrl;
+
+	INIT_LIST_HEAD(&priv->base.vblank.list);
+	spin_lock_init(&priv->base.vblank.lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x82),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644
index 000000000000..ba9dfd4669a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv94_disp_sclass[] = {
+	{ NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
+};
+
+static struct nouveau_omthds
+nv94_disp_base_omthds[] = {
+	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_TRAIN)    , nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL)   , nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+	{},
+};
+
+static struct nouveau_oclass
+nv94_disp_base_oclass[] = {
+	{ NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+	{}
+};
+
+static int
+nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nv94_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+	priv->sclass = nv94_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 4;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hdmi = nv84_hdmi_ctrl;
+	priv->sor.dp_train = nv94_sor_dp_train;
+	priv->sor.dp_train_init = nv94_sor_dp_train_init;
+	priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+	priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+	priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+	INIT_LIST_HEAD(&priv->base.vblank.list);
+	spin_lock_init(&priv->base.vblank.lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nv94_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x88),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv94_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644
index 000000000000..5d63902cdeda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva0_disp_sclass[] = {
+	{ NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nva0_disp_base_oclass[] = {
+	{ NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+	{}
+};
+
+static int
+nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nva0_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+	priv->sclass = nva0_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 2;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hdmi = nv84_hdmi_ctrl;
+
+	INIT_LIST_HEAD(&priv->base.vblank.list);
+	spin_lock_init(&priv->base.vblank.lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nva0_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x83),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva0_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644
index 000000000000..e9192ca389fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva3_disp_sclass[] = {
+	{ NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
+};
+
+struct nouveau_omthds
+nva3_disp_base_omthds[] = {
+	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+	{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD)     , nv50_sor_mthd },
+	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_TRAIN)    , nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL)   , nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+	{ SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+	{},
+};
+
+static struct nouveau_oclass
+nva3_disp_base_oclass[] = {
+	{ NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+	{}
+};
+
+static int
+nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nva3_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+	priv->sclass = nva3_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 4;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hda_eld = nva3_hda_eld;
+	priv->sor.hdmi = nva3_hdmi_ctrl;
+	priv->sor.dp_train = nv94_sor_dp_train;
+	priv->sor.dp_train_init = nv94_sor_dp_train_init;
+	priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+	priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+	priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+	INIT_LIST_HEAD(&priv->base.vblank.list);
+	spin_lock_init(&priv->base.vblank.lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nva3_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x85),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index d93efbcf75b8..9e38ebff5fb3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -22,22 +22,808 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
 
 #include <engine/software.h>
 #include <engine/disp.h>
 
-struct nvd0_disp_priv {
-	struct nouveau_disp base;
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
+			     struct nouveau_object *object, u32 name)
+{
+	struct nv50_disp_base *base = (void *)parent->parent;
+	struct nv50_disp_chan *chan = (void *)parent;
+	u32 addr = nv_gpuobj(object)->node->offset;
+	u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
+	return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+}
+
+static void
+nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+	struct nv50_disp_base *base = (void *)parent->parent;
+	nouveau_ramht_remove(base->ramht, cookie);
+}
+
+static int
+nvd0_disp_dmac_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *dmac = (void *)object;
+	int chid = dmac->base.chid;
+	int ret;
+
+	ret = nv50_disp_chan_init(&dmac->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* initialise channel for dma command submission */
+	nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
+	nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
+	nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
+	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+	nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
+		nv_error(dmac, "init: 0x%08x\n",
+			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *dmac = (void *)object;
+	int chid = dmac->base.chid;
+
+	/* deactivate channel */
+	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+		nv_error(dmac, "fini: 0x%08x\n",
+			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+	return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nvd0_disp_mast_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_mast_class *args = data;
+	struct nv50_disp_dmac *mast;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     0, sizeof(*mast), (void **)&mast);
+	*pobject = nv_object(mast);
+	if (ret)
+		return ret;
+
+	nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
+	nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
+	return 0;
+}
+
+static int
+nvd0_disp_mast_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *mast = (void *)object;
+	int ret;
+
+	ret = nv50_disp_chan_init(&mast->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
+	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
+
+	/* initialise channel for dma command submission */
+	nv_wr32(priv, 0x610494, mast->push);
+	nv_wr32(priv, 0x610498, 0x00010000);
+	nv_wr32(priv, 0x61049c, 0x00000001);
+	nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
+	nv_wr32(priv, 0x640000, 0x00000000);
+	nv_wr32(priv, 0x610490, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
+		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *mast = (void *)object;
+
+	/* deactivate channel */
+	nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
+	nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
+	if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
+		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
+	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
+
+	return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_mast_ofuncs = {
+	.ctor = nvd0_disp_mast_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nvd0_disp_mast_init,
+	.fini = nvd0_disp_mast_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_sync_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_sync_class *args = data;
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	if (size < sizeof(*data) || args->head >= priv->head.nr)
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     1 + args->head, sizeof(*dmac),
+				     (void **)&dmac);
+	*pobject = nv_object(dmac);
+	if (ret)
+		return ret;
+
+	nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+	nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+	return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_sync_ofuncs = {
+	.ctor = nvd0_disp_sync_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nvd0_disp_dmac_init,
+	.fini = nvd0_disp_dmac_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_ovly_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_ovly_class *args = data;
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	if (size < sizeof(*data) || args->head >= priv->head.nr)
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     5 + args->head, sizeof(*dmac),
+				     (void **)&dmac);
+	*pobject = nv_object(dmac);
+	if (ret)
+		return ret;
+
+	nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+	nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+	return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_ovly_ofuncs = {
+	.ctor = nvd0_disp_ovly_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nvd0_disp_dmac_init,
+	.fini = nvd0_disp_dmac_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_pioc_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, int chid,
+		       int length, void **pobject)
+{
+	return nv50_disp_chan_create_(parent, engine, oclass, chid,
+				      length, pobject);
+}
+
+static void
+nvd0_disp_pioc_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_pioc *pioc = (void *)object;
+	nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nvd0_disp_pioc_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_pioc *pioc = (void *)object;
+	int chid = pioc->base.chid;
+	int ret;
+
+	ret = nv50_disp_chan_init(&pioc->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* activate channel */
+	nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
+	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
+		nv_error(pioc, "init: 0x%08x\n",
+			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_pioc *pioc = (void *)object;
+	int chid = pioc->base.chid;
+
+	nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
+		nv_error(pioc, "timeout: 0x%08x\n",
+			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+	return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_oimm_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_oimm_class *args = data;
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_pioc *pioc;
+	int ret;
+
+	if (size < sizeof(*args) || args->head >= priv->head.nr)
+		return -EINVAL;
+
+	ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
+				     sizeof(*pioc), (void **)&pioc);
+	*pobject = nv_object(pioc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_oimm_ofuncs = {
+	.ctor = nvd0_disp_oimm_ctor,
+	.dtor = nvd0_disp_pioc_dtor,
+	.init = nvd0_disp_pioc_init,
+	.fini = nvd0_disp_pioc_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_curs_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_curs_class *args = data;
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_pioc *pioc;
+	int ret;
+
+	if (size < sizeof(*args) || args->head >= priv->head.nr)
+		return -EINVAL;
+
+	ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
+				     sizeof(*pioc), (void **)&pioc);
+	*pobject = nv_object(pioc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_curs_ofuncs = {
+	.ctor = nvd0_disp_curs_ctor,
+	.dtor = nvd0_disp_pioc_dtor,
+	.init = nvd0_disp_pioc_init,
+	.fini = nvd0_disp_pioc_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nvd0_disp_base_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_base *base;
+	int ret;
+
+	ret = nouveau_parent_create(parent, engine, oclass, 0,
+				    priv->sclass, 0, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nvd0_disp_base_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_base *base = (void *)object;
+	nouveau_ramht_ref(NULL, &base->ramht);
+	nouveau_parent_destroy(&base->base);
+}
+
+static int
+nvd0_disp_base_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_base *base = (void *)object;
+	int ret, i;
+	u32 tmp;
+
+	ret = nouveau_parent_init(&base->base);
+	if (ret)
+		return ret;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.
+	 */
+
+	/* ... CRTC caps */
+	for (i = 0; i < priv->head.nr; i++) {
+		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+		nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
+		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+		nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
+		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+		nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < priv->dac.nr; i++) {
+		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+		nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < priv->sor.nr; i++) {
+		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+		nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
+		nv_wr32(priv, 0x6100ac, 0x00000100);
+		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+			nv_error(priv, "timeout acquiring display\n");
+			return -EBUSY;
+		}
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nv_wr32(priv, 0x610090, 0x00000000);
+	nv_wr32(priv, 0x6100a0, 0x00000000);
+	nv_wr32(priv, 0x6100b0, 0x00000307);
+
+	return 0;
+}
+
+static int
+nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_base *base = (void *)object;
+
+	/* disable all interrupts */
+	nv_wr32(priv, 0x6100b0, 0x00000000);
+
+	return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_base_ofuncs = {
+	.ctor = nvd0_disp_base_ctor,
+	.dtor = nvd0_disp_base_dtor,
+	.init = nvd0_disp_base_init,
+	.fini = nvd0_disp_base_fini,
+};
+
+static struct nouveau_oclass
+nvd0_disp_base_oclass[] = {
+	{ NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+	{}
 };
 
 static struct nouveau_oclass
 nvd0_disp_sclass[] = {
-	{},
+	{ NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+	{ NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+	{ NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+	{ NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+	{ NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+	{}
 };
 
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+	    struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	    struct nvbios_outp *info)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	u16 mask, type, data;
+
+	if (outp < 4) {
+		type = DCB_OUTPUT_ANALOG;
+		mask = 0;
+	} else {
+		outp -= 4;
+		switch (ctrl & 0x00000f00) {
+		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+		default:
+			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+			return 0x0000;
+		}
+		dcb->sorconf.link = mask;
+	}
+
+	mask  = 0x00c0 & (mask << 6);
+	mask |= 0x0001 << outp;
+	mask |= 0x0100 << head;
+
+	data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+	if (!data)
+		return 0x0000;
+
+	return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_outp info;
+	struct dcb_output dcb;
+	u8  ver, hdr, cnt, len;
+	u16 data;
+
+	data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+	if (data) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(priv),
+			.bios = bios,
+			.offset = info.script[id],
+			.outp = &dcb,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		return nvbios_exec(&init) == 0;
+	}
+
+	return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
+	    u32 ctrl, int id, u32 pclk)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_outp info1;
+	struct nvbios_ocfg info2;
+	struct dcb_output dcb;
+	u8  ver, hdr, cnt, len;
+	u16 data, conf;
+
+	data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
+	if (data == 0x0000)
+		return false;
+
+	switch (dcb.type) {
+	case DCB_OUTPUT_TMDS:
+		conf = (ctrl & 0x00000f00) >> 8;
+		if (pclk >= 165000)
+			conf |= 0x0100;
+		break;
+	case DCB_OUTPUT_LVDS:
+		conf = priv->sor.lvdsconf;
+		break;
+	case DCB_OUTPUT_DP:
+		conf = (ctrl & 0x00000f00) >> 8;
+		break;
+	case DCB_OUTPUT_ANALOG:
+	default:
+		conf = 0x00ff;
+		break;
+	}
+
+	data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+	if (data) {
+		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+		if (data) {
+			struct nvbios_init init = {
+				.subdev = nv_subdev(priv),
+				.bios = bios,
+				.offset = data,
+				.outp = &dcb,
+				.crtc = head,
+				.execute = 1,
+			};
+
+			if (nvbios_exec(&init))
+				return 0x0000;
+			return conf;
+		}
+	}
+
+	return 0x0000;
+}
+
 static void
-nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+	int i;
+
+	for (i = 0; mask && i < 8; i++) {
+		u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+		if (mcc & (1 << head))
+			exec_script(priv, head, i, mcc, 1);
+	}
+
+	nv_wr32(priv, 0x6101d4, 0x00000000);
+	nv_wr32(priv, 0x6109d4, 0x00000000);
+	nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+{
+	const u32 ctrl = nv_rd32(priv, 0x660200 + (or   * 0x020));
+	const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
+	const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+	const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+	const u32 hoff = (head * 0x800);
+	const u32 soff = (  or * 0x800);
+	const u32 loff = (link * 0x080) + soff;
+	const u32 symbol = 100000;
+	const u32 TU = 64;
+	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
+	u32 clksor = nv_rd32(priv, 0x612300 + soff);
+	u32 datarate, link_nr, link_bw, bits;
+	u64 ratio, value;
+
+	if      ((conf & 0x3c0) == 0x180) bits = 30;
+	else if ((conf & 0x3c0) == 0x140) bits = 24;
+	else                              bits = 18;
+	datarate = (pclk * bits) / 8;
+
+	if      (dpctrl > 0x00030000) link_nr = 4;
+	else if (dpctrl > 0x00010000) link_nr = 2;
+	else			      link_nr = 1;
+
+	link_bw  = (clksor & 0x007c0000) >> 18;
+	link_bw *= 27000;
+
+	ratio  = datarate;
+	ratio *= symbol;
+	do_div(ratio, link_nr * link_bw);
+
+	value  = (symbol - ratio) * TU;
+	value *= ratio;
+	do_div(value, symbol);
+	do_div(value, symbol);
+
+	value += 5;
+	value |= 0x08000000;
+
+	nv_wr32(priv, 0x616610 + hoff, value);
+}
+
+static void
+nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+	u32 pclk;
+	int i;
+
+	for (i = 0; mask && i < 8; i++) {
+		u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+		if (mcc & (1 << head))
+			exec_script(priv, head, i, mcc, 2);
+	}
+
+	pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+	nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
+	if (pclk && (mask & 0x00010000)) {
+		struct nouveau_clock *clk = nouveau_clock(priv);
+		clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+	}
+
+	nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+
+	for (i = 0; mask && i < 8; i++) {
+		u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
+		if (mcp & (1 << head)) {
+			if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
+				u32 addr, mask, data = 0x00000000;
+				if (i < 4) {
+					addr = 0x612280 + ((i - 0) * 0x800);
+					mask = 0xffffffff;
+				} else {
+					switch (mcp & 0x00000f00) {
+					case 0x00000800:
+					case 0x00000900:
+						nvd0_display_unk2_calc_tu(priv, head, i - 4);
+						break;
+					default:
+						break;
+					}
+
+					addr = 0x612300 + ((i - 4) * 0x800);
+					mask = 0x00000707;
+					if (cfg & 0x00000100)
+						data = 0x00000101;
+				}
+				nv_mask(priv, addr, mask, data);
+			}
+			break;
+		}
+	}
+
+	nv_wr32(priv, 0x6101d4, 0x00000000);
+	nv_wr32(priv, 0x6109d4, 0x00000000);
+	nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+	int pclk, i;
+
+	pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+
+	for (i = 0; mask && i < 8; i++) {
+		u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
+		if (mcp & (1 << head))
+			exec_clkcmp(priv, head, i, mcp, 1, pclk);
+	}
+
+	nv_wr32(priv, 0x6101d4, 0x00000000);
+	nv_wr32(priv, 0x6109d4, 0x00000000);
+	nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
 {
 	struct nouveau_bar *bar = nouveau_bar(priv);
 	struct nouveau_disp *disp = &priv->base;
@@ -65,14 +851,71 @@ nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
 		disp->vblank.notify(disp->vblank.data, crtc);
 }
 
-static void
+void
 nvd0_disp_intr(struct nouveau_subdev *subdev)
 {
-	struct nvd0_disp_priv *priv = (void *)subdev;
+	struct nv50_disp_priv *priv = (void *)subdev;
 	u32 intr = nv_rd32(priv, 0x610088);
 	int i;
 
-	for (i = 0; i < 4; i++) {
+	if (intr & 0x00000001) {
+		u32 stat = nv_rd32(priv, 0x61008c);
+		nv_wr32(priv, 0x61008c, stat);
+		intr &= ~0x00000001;
+	}
+
+	if (intr & 0x00000002) {
+		u32 stat = nv_rd32(priv, 0x61009c);
+		int chid = ffs(stat) - 1;
+		if (chid >= 0) {
+			u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+			u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+			u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+			nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+				       "0x%08x 0x%08x\n",
+				 chid, (mthd & 0x0000ffc), data, mthd, unkn);
+			nv_wr32(priv, 0x61009c, (1 << chid));
+			nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+		}
+
+		intr &= ~0x00000002;
+	}
+
+	if (intr & 0x00100000) {
+		u32 stat = nv_rd32(priv, 0x6100ac);
+		u32 mask = 0, crtc = ~0;
+
+		while (!mask && ++crtc < priv->head.nr)
+			mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
+
+		if (stat & 0x00000001) {
+			nv_wr32(priv, 0x6100ac, 0x00000001);
+			nvd0_display_unk1_handler(priv, crtc, mask);
+			stat &= ~0x00000001;
+		}
+
+		if (stat & 0x00000002) {
+			nv_wr32(priv, 0x6100ac, 0x00000002);
+			nvd0_display_unk2_handler(priv, crtc, mask);
+			stat &= ~0x00000002;
+		}
+
+		if (stat & 0x00000004) {
+			nv_wr32(priv, 0x6100ac, 0x00000004);
+			nvd0_display_unk4_handler(priv, crtc, mask);
+			stat &= ~0x00000004;
+		}
+
+		if (stat) {
+			nv_info(priv, "unknown intr24 0x%08x\n", stat);
+			nv_wr32(priv, 0x6100ac, stat);
+		}
+
+		intr &= ~0x00100000;
+	}
+
+	for (i = 0; i < priv->head.nr; i++) {
 		u32 mask = 0x01000000 << i;
 		if (mask & intr) {
 			u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
@@ -86,10 +929,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
 
 static int
 nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		  struct nouveau_oclass *oclass, void *data, u32 size,
-		  struct nouveau_object **pobject)
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
 {
-	struct nvd0_disp_priv *priv;
+	struct nv50_disp_priv *priv;
 	int ret;
 
 	ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
@@ -98,8 +941,23 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	nv_engine(priv)->sclass = nvd0_disp_sclass;
+	nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
 	nv_subdev(priv)->intr = nvd0_disp_intr;
+	priv->sclass = nvd0_disp_sclass;
+	priv->head.nr = nv_rd32(priv, 0x022448);
+	priv->dac.nr = 3;
+	priv->sor.nr = 4;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hda_eld = nvd0_hda_eld;
+	priv->sor.hdmi = nvd0_hdmi_ctrl;
+	priv->sor.dp_train = nvd0_sor_dp_train;
+	priv->sor.dp_train_init = nv94_sor_dp_train_init;
+	priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+	priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+	priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
 
 	INIT_LIST_HEAD(&priv->base.vblank.list);
 	spin_lock_init(&priv->base.vblank.lock);
@@ -108,7 +966,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 
 struct nouveau_oclass
 nvd0_disp_oclass = {
-	.handle = NV_ENGINE(DISP, 0xd0),
+	.handle = NV_ENGINE(DISP, 0x90),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nvd0_disp_ctor,
 		.dtor = _nouveau_disp_dtor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644
index 000000000000..259537c4587e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nve0_disp_sclass[] = {
+	{ NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+	{ NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+	{ NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+	{ NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+	{ NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nve0_disp_base_oclass[] = {
+	{ NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+	{}
+};
+
+static int
+nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nve0_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nvd0_disp_intr;
+	priv->sclass = nve0_disp_sclass;
+	priv->head.nr = nv_rd32(priv, 0x022448);
+	priv->dac.nr = 3;
+	priv->sor.nr = 4;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hda_eld = nvd0_hda_eld;
+	priv->sor.hdmi = nvd0_hdmi_ctrl;
+	priv->sor.dp_train = nvd0_sor_dp_train;
+	priv->sor.dp_train_init = nv94_sor_dp_train_init;
+	priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+	priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+	priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
+
+	INIT_LIST_HEAD(&priv->base.vblank.list);
+	spin_lock_init(&priv->base.vblank.lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x91),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644
index 000000000000..39b6b67732d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+	const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
+	const u32 soff = (or * 0x800);
+	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+	nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+	nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+	return 0;
+}
+
+int
+nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
+	const u8  head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
+	const u8  link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
+	const u8    or = (mthd & NV50_DISP_SOR_MTHD_OR);
+	const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
+	struct dcb_output outp;
+	u8  ver, hdr;
+	u32 data;
+	int ret = -EINVAL;
+
+	if (size < sizeof(u32))
+		return -EINVAL;
+	data = *(u32 *)args;
+
+	if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
+		return -ENODEV;
+
+	switch (mthd & ~0x3f) {
+	case NV50_DISP_SOR_PWR:
+		ret = priv->sor.power(priv, or, data);
+		break;
+	case NVA3_DISP_SOR_HDA_ELD:
+		ret = priv->sor.hda_eld(priv, or, args, size);
+		break;
+	case NV84_DISP_SOR_HDMI_PWR:
+		ret = priv->sor.hdmi(priv, head, or, data);
+		break;
+	case NV50_DISP_SOR_LVDS_SCRIPT:
+		priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
+		ret = 0;
+		break;
+	case NV94_DISP_SOR_DP_TRAIN:
+		switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
+		case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
+			ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
+			break;
+		case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
+			ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
+			break;
+		case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
+			ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
+			break;
+		default:
+			break;
+		}
+		break;
+	case NV94_DISP_SOR_DP_LNKCTL:
+		ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
+		break;
+	case NV94_DISP_SOR_DP_DRVCTL(0):
+	case NV94_DISP_SOR_DP_DRVCTL(1):
+	case NV94_DISP_SOR_DP_DRVCTL(2):
+	case NV94_DISP_SOR_DP_DRVCTL(3):
+		ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
+				          type, mask, data, &outp);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644
index 000000000000..f6edd009762e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+	static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+	static const u8 nv94[] = { 16, 8, 0, 24 };
+	if (nv_device(priv)->chipset == 0xaf)
+		return nvaf[lane];
+	return nv94[lane];
+}
+
+int
+nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
+		       u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_dpout info;
+	u8  ver, hdr, cnt, len;
+	u16 outp;
+
+	outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+	if (outp) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(priv),
+			.bios = bios,
+			.outp = dcbo,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
+			init.offset = info.script[2];
+		else
+			init.offset = info.script[3];
+		nvbios_exec(&init);
+
+		init.offset = info.script[0];
+		nvbios_exec(&init);
+	}
+
+	return 0;
+}
+
+int
+nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
+		       u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_dpout info;
+	u8  ver, hdr, cnt, len;
+	u16 outp;
+
+	outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+	if (outp) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(priv),
+			.bios = bios,
+			.offset = info.script[1],
+			.outp = dcbo,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		nvbios_exec(&init);
+	}
+
+	return 0;
+}
+
+int
+nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+		  u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+	const u32 loff = (or * 0x800) + (link * 0x80);
+	const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+	nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+	return 0;
+}
+
+int
+nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+		   u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	const u32 loff = (or * 0x800) + (link * 0x80);
+	const u32 soff = (or * 0x800);
+	u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+	u8  link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+	u32 dpctrl = 0x00000000;
+	u32 clksor = 0x00000000;
+	u32 outp, lane = 0;
+	u8  ver, hdr, cnt, len;
+	struct nvbios_dpout info;
+	int i;
+
+	/* -> 10Khz units */
+	link_bw *= 2700;
+
+	outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+	if (outp && info.lnkcmp) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(priv),
+			.bios = bios,
+			.offset = 0x0000,
+			.outp = dcbo,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		while (link_bw < nv_ro16(bios, info.lnkcmp))
+			info.lnkcmp += 4;
+		init.offset = nv_ro16(bios, info.lnkcmp + 2);
+
+		nvbios_exec(&init);
+	}
+
+	dpctrl |= ((1 << link_nr) - 1) << 16;
+	if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+		dpctrl |= 0x00004000;
+	if (link_bw > 16200)
+		clksor |= 0x00040000;
+
+	for (i = 0; i < link_nr; i++)
+		lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+
+	nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
+	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+	nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+	return 0;
+}
+
+int
+nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+		   u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	const u32 loff = (or * 0x800) + (link * 0x80);
+	const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+	const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+	u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+	u8  ver, hdr, cnt, len;
+	struct nvbios_dpout outp;
+	struct nvbios_dpcfg ocfg;
+
+	addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+	if (!addr)
+		return -ENODEV;
+
+	addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+	if (!addr)
+		return -EINVAL;
+
+	nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+	nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+	nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644
index 000000000000..c37ce7e29f5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+	static const u8 nvd0[] = { 16, 8, 0, 24 };
+	return nvd0[lane];
+}
+
+int
+nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+		  u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+	const u32 loff = (or * 0x800) + (link * 0x80);
+	const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+	nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+	return 0;
+}
+
+int
+nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+		   u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	const u32 loff = (or * 0x800) + (link * 0x80);
+	const u32 soff = (or * 0x800);
+	const u8  link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+	const u8  link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+	u32 dpctrl = 0x00000000;
+	u32 clksor = 0x00000000;
+	u32 outp, lane = 0;
+	u8  ver, hdr, cnt, len;
+	struct nvbios_dpout info;
+	int i;
+
+	outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+	if (outp && info.lnkcmp) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(priv),
+			.bios = bios,
+			.offset = 0x0000,
+			.outp = dcbo,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		while (nv_ro08(bios, info.lnkcmp) < link_bw)
+			info.lnkcmp += 3;
+		init.offset = nv_ro16(bios, info.lnkcmp + 1);
+
+		nvbios_exec(&init);
+	}
+
+	clksor |= link_bw << 18;
+	dpctrl |= ((1 << link_nr) - 1) << 16;
+	if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+		dpctrl |= 0x00004000;
+
+	for (i = 0; i < link_nr; i++)
+		lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
+
+	nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
+	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+	nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+	return 0;
+}
+
+int
+nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+		   u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	const u32 loff = (or * 0x800) + (link * 0x80);
+	const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+	const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+	u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+	u8  ver, hdr, cnt, len;
+	struct nvbios_dpout outp;
+	struct nvbios_dpcfg ocfg;
+
+	addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+	if (!addr)
+		return -ENODEV;
+
+	addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+	if (!addr)
+		return -EINVAL;
+
+	nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+	nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+	nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+	nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index e1f013d39768..5103e88d1877 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -28,37 +28,39 @@
 #include <subdev/fb.h>
 #include <engine/dmaobj.h>
 
-int
-nouveau_dmaobj_create_(struct nouveau_object *parent,
-		       struct nouveau_object *engine,
-		       struct nouveau_oclass *oclass,
-		       void *data, u32 size, int len, void **pobject)
+static int
+nouveau_dmaobj_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
 {
+	struct nouveau_dmaeng *dmaeng = (void *)engine;
+	struct nouveau_dmaobj *dmaobj;
+	struct nouveau_gpuobj *gpuobj;
 	struct nv_dma_class *args = data;
-	struct nouveau_dmaobj *object;
 	int ret;
 
 	if (size < sizeof(*args))
 		return -EINVAL;
 
-	ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
-	object = *pobject;
+	ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
+	*pobject = nv_object(dmaobj);
 	if (ret)
 		return ret;
 
 	switch (args->flags & NV_DMA_TARGET_MASK) {
 	case NV_DMA_TARGET_VM:
-		object->target = NV_MEM_TARGET_VM;
+		dmaobj->target = NV_MEM_TARGET_VM;
 		break;
 	case NV_DMA_TARGET_VRAM:
-		object->target = NV_MEM_TARGET_VRAM;
+		dmaobj->target = NV_MEM_TARGET_VRAM;
 		break;
 	case NV_DMA_TARGET_PCI:
-		object->target = NV_MEM_TARGET_PCI;
+		dmaobj->target = NV_MEM_TARGET_PCI;
 		break;
 	case NV_DMA_TARGET_PCI_US:
 	case NV_DMA_TARGET_AGP:
-		object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+		dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
 		break;
 	default:
 		return -EINVAL;
@@ -66,22 +68,53 @@ nouveau_dmaobj_create_(struct nouveau_object *parent,
 
 	switch (args->flags & NV_DMA_ACCESS_MASK) {
 	case NV_DMA_ACCESS_VM:
-		object->access = NV_MEM_ACCESS_VM;
+		dmaobj->access = NV_MEM_ACCESS_VM;
 		break;
 	case NV_DMA_ACCESS_RD:
-		object->access = NV_MEM_ACCESS_RO;
+		dmaobj->access = NV_MEM_ACCESS_RO;
 		break;
 	case NV_DMA_ACCESS_WR:
-		object->access = NV_MEM_ACCESS_WO;
+		dmaobj->access = NV_MEM_ACCESS_WO;
 		break;
 	case NV_DMA_ACCESS_RDWR:
-		object->access = NV_MEM_ACCESS_RW;
+		dmaobj->access = NV_MEM_ACCESS_RW;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	object->start = args->start;
-	object->limit = args->limit;
-	return 0;
+	dmaobj->start = args->start;
+	dmaobj->limit = args->limit;
+	dmaobj->conf0 = args->conf0;
+
+	switch (nv_mclass(parent)) {
+	case NV_DEVICE_CLASS:
+		/* delayed, or no, binding */
+		break;
+	default:
+		ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
+		if (ret == 0) {
+			nouveau_object_ref(NULL, pobject);
+			*pobject = nv_object(gpuobj);
+		}
+		break;
+	}
+
+	return ret;
 }
+
+static struct nouveau_ofuncs
+nouveau_dmaobj_ofuncs = {
+	.ctor = nouveau_dmaobj_ctor,
+	.dtor = nouveau_object_destroy,
+	.init = nouveau_object_init,
+	.fini = nouveau_object_fini,
+};
+
+struct nouveau_oclass
+nouveau_dmaobj_sclass[] = {
+	{ NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+	{ NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+	{ NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+	{}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 9f4cc2f31994..027d8217c0fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -34,10 +34,6 @@ struct nv04_dmaeng_priv {
 	struct nouveau_dmaeng base;
 };
 
-struct nv04_dmaobj_priv {
-	struct nouveau_dmaobj base;
-};
-
 static int
 nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
 		 struct nouveau_object *parent,
@@ -53,6 +49,18 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
 	u32 length = dmaobj->limit - dmaobj->start;
 	int ret;
 
+	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+		switch (nv_mclass(parent->parent)) {
+		case NV03_CHANNEL_DMA_CLASS:
+		case NV10_CHANNEL_DMA_CLASS:
+		case NV17_CHANNEL_DMA_CLASS:
+		case NV40_CHANNEL_DMA_CLASS:
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
 	if (dmaobj->target == NV_MEM_TARGET_VM) {
 		if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
 			struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
@@ -105,56 +113,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
 	return ret;
 }
 
-static int
-nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		 struct nouveau_oclass *oclass, void *data, u32 size,
-		 struct nouveau_object **pobject)
-{
-	struct nouveau_dmaeng *dmaeng = (void *)engine;
-	struct nv04_dmaobj_priv *dmaobj;
-	struct nouveau_gpuobj *gpuobj;
-	int ret;
-
-	ret = nouveau_dmaobj_create(parent, engine, oclass,
-				    data, size, &dmaobj);
-	*pobject = nv_object(dmaobj);
-	if (ret)
-		return ret;
-
-	switch (nv_mclass(parent)) {
-	case NV_DEVICE_CLASS:
-		break;
-	case NV03_CHANNEL_DMA_CLASS:
-	case NV10_CHANNEL_DMA_CLASS:
-	case NV17_CHANNEL_DMA_CLASS:
-	case NV40_CHANNEL_DMA_CLASS:
-		ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
-		nouveau_object_ref(NULL, pobject);
-		*pobject = nv_object(gpuobj);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
-static struct nouveau_ofuncs
-nv04_dmaobj_ofuncs = {
-	.ctor = nv04_dmaobj_ctor,
-	.dtor = _nouveau_dmaobj_dtor,
-	.init = _nouveau_dmaobj_init,
-	.fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv04_dmaobj_sclass[] = {
-	{ 0x0002, &nv04_dmaobj_ofuncs },
-	{ 0x0003, &nv04_dmaobj_ofuncs },
-	{ 0x003d, &nv04_dmaobj_ofuncs },
-	{}
-};
-
 static int
 nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -168,7 +126,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	priv->base.base.sclass = nv04_dmaobj_sclass;
+	nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
 	priv->base.bind = nv04_dmaobj_bind;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 045d2565e289..750183f7c057 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -32,36 +32,74 @@ struct nv50_dmaeng_priv {
 	struct nouveau_dmaeng base;
 };
 
-struct nv50_dmaobj_priv {
-	struct nouveau_dmaobj base;
-};
-
 static int
 nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
 		 struct nouveau_object *parent,
 		 struct nouveau_dmaobj *dmaobj,
 		 struct nouveau_gpuobj **pgpuobj)
 {
-	u32 flags = nv_mclass(dmaobj);
+	u32 flags0 = nv_mclass(dmaobj);
+	u32 flags5 = 0x00000000;
 	int ret;
 
+	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+		switch (nv_mclass(parent->parent)) {
+		case NV50_CHANNEL_DMA_CLASS:
+		case NV84_CHANNEL_DMA_CLASS:
+		case NV50_CHANNEL_IND_CLASS:
+		case NV84_CHANNEL_IND_CLASS:
+		case NV50_DISP_MAST_CLASS:
+		case NV84_DISP_MAST_CLASS:
+		case NV94_DISP_MAST_CLASS:
+		case NVA0_DISP_MAST_CLASS:
+		case NVA3_DISP_MAST_CLASS:
+		case NV50_DISP_SYNC_CLASS:
+		case NV84_DISP_SYNC_CLASS:
+		case NV94_DISP_SYNC_CLASS:
+		case NVA0_DISP_SYNC_CLASS:
+		case NVA3_DISP_SYNC_CLASS:
+		case NV50_DISP_OVLY_CLASS:
+		case NV84_DISP_OVLY_CLASS:
+		case NV94_DISP_OVLY_CLASS:
+		case NVA0_DISP_OVLY_CLASS:
+		case NVA3_DISP_OVLY_CLASS:
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
+		if (dmaobj->target == NV_MEM_TARGET_VM) {
+			dmaobj->conf0  = NV50_DMA_CONF0_PRIV_VM;
+			dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
+			dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
+			dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+		} else {
+			dmaobj->conf0  = NV50_DMA_CONF0_PRIV_US;
+			dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
+			dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
+			dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+		}
+	}
+
+	flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
+	flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
+	flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
+	flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+
 	switch (dmaobj->target) {
 	case NV_MEM_TARGET_VM:
-		flags |= 0x00000000;
-		flags |= 0x60000000; /* COMPRESSION_USEVM */
-		flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+		flags0 |= 0x00000000;
 		break;
 	case NV_MEM_TARGET_VRAM:
-		flags |= 0x00010000;
-		flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+		flags0 |= 0x00010000;
 		break;
 	case NV_MEM_TARGET_PCI:
-		flags |= 0x00020000;
-		flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+		flags0 |= 0x00020000;
 		break;
 	case NV_MEM_TARGET_PCI_NOSNOOP:
-		flags |= 0x00030000;
-		flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+		flags0 |= 0x00030000;
 		break;
 	default:
 		return -EINVAL;
@@ -71,78 +109,28 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
 	case NV_MEM_ACCESS_VM:
 		break;
 	case NV_MEM_ACCESS_RO:
-		flags |= 0x00040000;
+		flags0 |= 0x00040000;
 		break;
 	case NV_MEM_ACCESS_WO:
 	case NV_MEM_ACCESS_RW:
-		flags |= 0x00080000;
+		flags0 |= 0x00080000;
 		break;
 	}
 
 	ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
 	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, flags);
+		nv_wo32(*pgpuobj, 0x00, flags0);
 		nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
 		nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
 		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
 					upper_32_bits(dmaobj->start));
 		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, 0x00000000);
+		nv_wo32(*pgpuobj, 0x14, flags5);
 	}
 
 	return ret;
 }
 
-static int
-nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		 struct nouveau_oclass *oclass, void *data, u32 size,
-		 struct nouveau_object **pobject)
-{
-	struct nouveau_dmaeng *dmaeng = (void *)engine;
-	struct nv50_dmaobj_priv *dmaobj;
-	struct nouveau_gpuobj *gpuobj;
-	int ret;
-
-	ret = nouveau_dmaobj_create(parent, engine, oclass,
-				    data, size, &dmaobj);
-	*pobject = nv_object(dmaobj);
-	if (ret)
-		return ret;
-
-	switch (nv_mclass(parent)) {
-	case NV_DEVICE_CLASS:
-		break;
-	case NV50_CHANNEL_DMA_CLASS:
-	case NV84_CHANNEL_DMA_CLASS:
-	case NV50_CHANNEL_IND_CLASS:
-	case NV84_CHANNEL_IND_CLASS:
-		ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
-		nouveau_object_ref(NULL, pobject);
-		*pobject = nv_object(gpuobj);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
-static struct nouveau_ofuncs
-nv50_dmaobj_ofuncs = {
-	.ctor = nv50_dmaobj_ctor,
-	.dtor = _nouveau_dmaobj_dtor,
-	.init = _nouveau_dmaobj_init,
-	.fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv50_dmaobj_sclass[] = {
-	{ 0x0002, &nv50_dmaobj_ofuncs },
-	{ 0x0003, &nv50_dmaobj_ofuncs },
-	{ 0x003d, &nv50_dmaobj_ofuncs },
-	{}
-};
-
 static int
 nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -156,7 +144,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	priv->base.base.sclass = nv50_dmaobj_sclass;
+	nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
 	priv->base.bind = nv50_dmaobj_bind;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index 5baa08695535..cd3970d03b80 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,7 +22,9 @@
  * Authors: Ben Skeggs
  */
 
+#include <core/device.h>
 #include <core/gpuobj.h>
+#include <core/class.h>
 
 #include <subdev/fb.h>
 #include <engine/dmaobj.h>
@@ -31,45 +33,86 @@ struct nvc0_dmaeng_priv {
 	struct nouveau_dmaeng base;
 };
 
-struct nvc0_dmaobj_priv {
-	struct nouveau_dmaobj base;
-};
-
 static int
-nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		 struct nouveau_oclass *oclass, void *data, u32 size,
-		 struct nouveau_object **pobject)
+nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+		 struct nouveau_object *parent,
+		 struct nouveau_dmaobj *dmaobj,
+		 struct nouveau_gpuobj **pgpuobj)
 {
-	struct nvc0_dmaobj_priv *dmaobj;
+	u32 flags0 = nv_mclass(dmaobj);
+	u32 flags5 = 0x00000000;
 	int ret;
 
-	ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
-	*pobject = nv_object(dmaobj);
-	if (ret)
-		return ret;
+	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+		switch (nv_mclass(parent->parent)) {
+		case NVA3_DISP_MAST_CLASS:
+		case NVA3_DISP_SYNC_CLASS:
+		case NVA3_DISP_OVLY_CLASS:
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else
+		return 0;
 
-	if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+	if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
+		if (dmaobj->target == NV_MEM_TARGET_VM) {
+			dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_VM;
+			dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+		} else {
+			dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_US;
+			dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
+			dmaobj->conf0 |= 0x00020000;
+		}
+	}
+
+	flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
+	flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
+	flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VM:
+		flags0 |= 0x00000000;
+		break;
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00030000;
+		break;
+	default:
 		return -EINVAL;
+	}
 
-	return 0;
+	switch (dmaobj->access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		flags0 |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		flags0 |= 0x00080000;
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+	if (ret == 0) {
+		nv_wo32(*pgpuobj, 0x00, flags0);
+		nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+		nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+					upper_32_bits(dmaobj->start));
+		nv_wo32(*pgpuobj, 0x10, 0x00000000);
+		nv_wo32(*pgpuobj, 0x14, flags5);
+	}
+
+	return ret;
 }
 
-static struct nouveau_ofuncs
-nvc0_dmaobj_ofuncs = {
-	.ctor = nvc0_dmaobj_ctor,
-	.dtor = _nouveau_dmaobj_dtor,
-	.init = _nouveau_dmaobj_init,
-	.fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nvc0_dmaobj_sclass[] = {
-	{ 0x0002, &nvc0_dmaobj_ofuncs },
-	{ 0x0003, &nvc0_dmaobj_ofuncs },
-	{ 0x003d, &nvc0_dmaobj_ofuncs },
-	{}
-};
-
 static int
 nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -83,7 +126,8 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	priv->base.base.sclass = nvc0_dmaobj_sclass;
+	nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+	priv->base.bind = nvc0_dmaobj_bind;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644
index 000000000000..d1528752980c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvd0_dmaeng_priv {
+	struct nouveau_dmaeng base;
+};
+
+static int
+nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+		 struct nouveau_object *parent,
+		 struct nouveau_dmaobj *dmaobj,
+		 struct nouveau_gpuobj **pgpuobj)
+{
+	u32 flags0 = 0x00000000;
+	int ret;
+
+	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+		switch (nv_mclass(parent->parent)) {
+		case NVD0_DISP_MAST_CLASS:
+		case NVD0_DISP_SYNC_CLASS:
+		case NVD0_DISP_OVLY_CLASS:
+		case NVE0_DISP_MAST_CLASS:
+		case NVE0_DISP_SYNC_CLASS:
+		case NVE0_DISP_OVLY_CLASS:
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else
+		return 0;
+
+	if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
+		if (dmaobj->target == NV_MEM_TARGET_VM) {
+			dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
+			dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
+		} else {
+			dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
+			dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
+		}
+	}
+
+	flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
+	flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00000009;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+	if (ret == 0) {
+		nv_wo32(*pgpuobj, 0x00, flags0);
+		nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
+		nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+		nv_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nv_wo32(*pgpuobj, 0x10, 0x00000000);
+		nv_wo32(*pgpuobj, 0x14, 0x00000000);
+	}
+
+	return ret;
+}
+
+static int
+nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nvd0_dmaeng_priv *priv;
+	int ret;
+
+	ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+	priv->base.bind = nvd0_dmaobj_bind;
+	return 0;
+}
+
+struct nouveau_oclass
+nvd0_dmaeng_oclass = {
+	.handle = NV_ENGINE(DMAOBJ, 0xd0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_dmaeng_ctor,
+		.dtor = _nouveau_dmaeng_dtor,
+		.init = _nouveau_dmaeng_init,
+		.fini = _nouveau_dmaeng_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index bbb43c67c2ae..c2b9db335816 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -24,6 +24,7 @@
 
 #include <core/object.h>
 #include <core/handle.h>
+#include <core/class.h>
 
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
@@ -33,7 +34,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
 			     struct nouveau_object *engine,
 			     struct nouveau_oclass *oclass,
 			     int bar, u32 addr, u32 size, u32 pushbuf,
-			     u32 engmask, int len, void **ptr)
+			     u64 engmask, int len, void **ptr)
 {
 	struct nouveau_device *device = nv_device(engine);
 	struct nouveau_fifo *priv = (void *)engine;
@@ -56,18 +57,16 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
 
 	dmaeng = (void *)chan->pushdma->base.engine;
 	switch (chan->pushdma->base.oclass->handle) {
-	case 0x0002:
-	case 0x003d:
+	case NV_DMA_FROM_MEMORY_CLASS:
+	case NV_DMA_IN_MEMORY_CLASS:
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	if (dmaeng->bind) {
-		ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
-		if (ret)
-			return ret;
-	}
+	ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+	if (ret)
+		return ret;
 
 	/* find a free fifo channel */
 	spin_lock_irqsave(&priv->lock, flags);
@@ -119,14 +118,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
 }
 
 u32
-_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
 {
 	struct nouveau_fifo_chan *chan = (void *)object;
 	return ioread32_native(chan->user + addr);
 }
 
 void
-_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
 	struct nouveau_fifo_chan *chan = (void *)object;
 	iowrite32_native(data, chan->user + addr);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index ea76e3e8c9c2..a47a8548f9e0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
 
 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
 					  0x10000, args->pushbuf,
-					  (1 << NVDEV_ENGINE_DMAOBJ) |
-					  (1 << NVDEV_ENGINE_SW) |
-					  (1 << NVDEV_ENGINE_GR), &chan);
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
@@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
 			}
 
 			if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
-				nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+				nv_error(priv, "CACHE_ERROR - Ch %d/%d "
 					      "Mthd 0x%04x Data 0x%08x\n",
 					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
 					data);
@@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
 				u32 ib_get = nv_rd32(priv, 0x003334);
 				u32 ib_put = nv_rd32(priv, 0x003330);
 
-				nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+				nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
 				     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
 				     "State 0x%08x (err: %s) Push 0x%08x\n",
 					chid, ho_get, dma_get, ho_put,
@@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
 					nv_wr32(priv, 0x003334, ib_put);
 				}
 			} else {
-				nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
+				nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
 					     "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
 					chid, dma_get, dma_put, state,
 					nv_dma_state_err(state), push);
@@ -525,14 +525,13 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
 
 		if (device->card_type == NV_50) {
 			if (status & 0x00000010) {
-				nv50_fb_trap(nouveau_fb(priv), 1);
 				status &= ~0x00000010;
 				nv_wr32(priv, 0x002100, 0x00000010);
 			}
 		}
 
 		if (status) {
-			nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+			nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
 				status, chid);
 			nv_wr32(priv, NV03_PFIFO_INTR_0, status);
 			status = 0;
@@ -542,7 +541,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
 	}
 
 	if (status) {
-		nv_info(priv, "still angry after %d spins, halt\n", cnt);
+		nv_error(priv, "still angry after %d spins, halt\n", cnt);
 		nv_wr32(priv, 0x002140, 0);
 		nv_wr32(priv, 0x000140, 0);
 	}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 4ba75422b89d..2c927c1d173b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
 
 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
 					  0x10000, args->pushbuf,
-					  (1 << NVDEV_ENGINE_DMAOBJ) |
-					  (1 << NVDEV_ENGINE_SW) |
-					  (1 << NVDEV_ENGINE_GR), &chan);
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index b96e6b0ae2b1..a9cb51d38c57 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
 
 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
 					  0x10000, args->pushbuf,
-					  (1 << NVDEV_ENGINE_DMAOBJ) |
-					  (1 << NVDEV_ENGINE_SW) |
-					  (1 << NVDEV_ENGINE_GR) |
-					  (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
 					  &chan);
 	*pobject = nv_object(chan);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 559c3b4e1b86..2b1f91721225 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
 
 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
 					  0x1000, args->pushbuf,
-					  (1 << NVDEV_ENGINE_DMAOBJ) |
-					  (1 << NVDEV_ENGINE_SW) |
-					  (1 << NVDEV_ENGINE_GR) |
-					  (1 << NVDEV_ENGINE_MPEG), &chan);
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 536e7634a00d..bd096364f680 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -112,14 +112,6 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
 		return -EINVAL;
 	}
 
-	nv_wo32(base->eng, addr + 0x00, 0x00000000);
-	nv_wo32(base->eng, addr + 0x04, 0x00000000);
-	nv_wo32(base->eng, addr + 0x08, 0x00000000);
-	nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-
 	/* HW bug workaround:
 	 *
 	 * PFIFO will hang forever if the connected engines don't report
@@ -141,8 +133,18 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
 		if (suspend)
 			ret = -EBUSY;
 	}
-
 	nv_wr32(priv, 0x00b860, me);
+
+	if (ret == 0) {
+		nv_wo32(base->eng, addr + 0x00, 0x00000000);
+		nv_wo32(base->eng, addr + 0x04, 0x00000000);
+		nv_wo32(base->eng, addr + 0x08, 0x00000000);
+		nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+		nv_wo32(base->eng, addr + 0x10, 0x00000000);
+		nv_wo32(base->eng, addr + 0x14, 0x00000000);
+		bar->flush(bar);
+	}
+
 	return ret;
 }
 
@@ -194,10 +196,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
 
 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
 					  0x2000, args->pushbuf,
-					  (1 << NVDEV_ENGINE_DMAOBJ) |
-					  (1 << NVDEV_ENGINE_SW) |
-					  (1 << NVDEV_ENGINE_GR) |
-					  (1 << NVDEV_ENGINE_MPEG), &chan);
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
@@ -247,10 +249,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
 
 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
 					  0x2000, args->pushbuf,
-					  (1 << NVDEV_ENGINE_DMAOBJ) |
-					  (1 << NVDEV_ENGINE_SW) |
-					  (1 << NVDEV_ENGINE_GR) |
-					  (1 << NVDEV_ENGINE_MPEG), &chan);
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index b4fd26d8f166..1eb1c512f503 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -95,14 +95,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
 		return -EINVAL;
 	}
 
-	nv_wo32(base->eng, addr + 0x00, 0x00000000);
-	nv_wo32(base->eng, addr + 0x04, 0x00000000);
-	nv_wo32(base->eng, addr + 0x08, 0x00000000);
-	nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-
 	save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
 	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
 	done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
@@ -112,6 +104,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
 		if (suspend)
 			return -EBUSY;
 	}
+
+	nv_wo32(base->eng, addr + 0x00, 0x00000000);
+	nv_wo32(base->eng, addr + 0x04, 0x00000000);
+	nv_wo32(base->eng, addr + 0x08, 0x00000000);
+	nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+	nv_wo32(base->eng, addr + 0x10, 0x00000000);
+	nv_wo32(base->eng, addr + 0x14, 0x00000000);
+	bar->flush(bar);
 	return 0;
 }
 
@@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
 
 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
 					  0x2000, args->pushbuf,
-					  (1 << NVDEV_ENGINE_DMAOBJ) |
-					  (1 << NVDEV_ENGINE_SW) |
-					  (1 << NVDEV_ENGINE_GR) |
-					  (1 << NVDEV_ENGINE_MPEG) |
-					  (1 << NVDEV_ENGINE_ME) |
-					  (1 << NVDEV_ENGINE_VP) |
-					  (1 << NVDEV_ENGINE_CRYPT) |
-					  (1 << NVDEV_ENGINE_BSP) |
-					  (1 << NVDEV_ENGINE_PPP) |
-					  (1 << NVDEV_ENGINE_COPY0) |
-					  (1 << NVDEV_ENGINE_UNK1C1), &chan);
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG) |
+					  (1ULL << NVDEV_ENGINE_ME) |
+					  (1ULL << NVDEV_ENGINE_VP) |
+					  (1ULL << NVDEV_ENGINE_CRYPT) |
+					  (1ULL << NVDEV_ENGINE_BSP) |
+					  (1ULL << NVDEV_ENGINE_PPP) |
+					  (1ULL << NVDEV_ENGINE_COPY0) |
+					  (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
@@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
 
 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
 					  0x2000, args->pushbuf,
-					  (1 << NVDEV_ENGINE_DMAOBJ) |
-					  (1 << NVDEV_ENGINE_SW) |
-					  (1 << NVDEV_ENGINE_GR) |
-					  (1 << NVDEV_ENGINE_MPEG) |
-					  (1 << NVDEV_ENGINE_ME) |
-					  (1 << NVDEV_ENGINE_VP) |
-					  (1 << NVDEV_ENGINE_CRYPT) |
-					  (1 << NVDEV_ENGINE_BSP) |
-					  (1 << NVDEV_ENGINE_PPP) |
-					  (1 << NVDEV_ENGINE_COPY0) |
-					  (1 << NVDEV_ENGINE_UNK1C1), &chan);
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG) |
+					  (1ULL << NVDEV_ENGINE_ME) |
+					  (1ULL << NVDEV_ENGINE_VP) |
+					  (1ULL << NVDEV_ENGINE_CRYPT) |
+					  (1ULL << NVDEV_ENGINE_BSP) |
+					  (1ULL << NVDEV_ENGINE_PPP) |
+					  (1ULL << NVDEV_ENGINE_COPY0) |
+					  (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 6f21be600557..b4365dde1859 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -103,6 +103,9 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
 	case NVDEV_ENGINE_GR   : addr = 0x0210; break;
 	case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
 	case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
 	default:
 		return -EINVAL;
 	}
@@ -137,14 +140,13 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
 	case NVDEV_ENGINE_GR   : addr = 0x0210; break;
 	case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
 	case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
 	default:
 		return -EINVAL;
 	}
 
-	nv_wo32(base, addr + 0x00, 0x00000000);
-	nv_wo32(base, addr + 0x04, 0x00000000);
-	bar->flush(bar);
-
 	nv_wr32(priv, 0x002634, chan->base.chid);
 	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
 		nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -152,6 +154,9 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
 			return -EBUSY;
 	}
 
+	nv_wo32(base, addr + 0x00, 0x00000000);
+	nv_wo32(base, addr + 0x04, 0x00000000);
+	bar->flush(bar);
 	return 0;
 }
 
@@ -175,10 +180,13 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
 					  priv->user.bar.offset, 0x1000,
 					  args->pushbuf,
-					  (1 << NVDEV_ENGINE_SW) |
-					  (1 << NVDEV_ENGINE_GR) |
-					  (1 << NVDEV_ENGINE_COPY0) |
-					  (1 << NVDEV_ENGINE_COPY1), &chan);
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_COPY0) |
+					  (1ULL << NVDEV_ENGINE_COPY1) |
+					  (1ULL << NVDEV_ENGINE_BSP) |
+					  (1ULL << NVDEV_ENGINE_VP) |
+					  (1ULL << NVDEV_ENGINE_PPP), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
@@ -494,7 +502,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
 	u32 stat = nv_rd32(priv, 0x002100) & mask;
 
 	if (stat & 0x00000100) {
-		nv_info(priv, "unknown status 0x00000100\n");
+		nv_warn(priv, "unknown status 0x00000100\n");
 		nv_wr32(priv, 0x002100, 0x00000100);
 		stat &= ~0x00000100;
 	}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 36e81b6fafbc..c930da99c2c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -38,12 +38,12 @@
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 
-#define _(a,b) { (a), ((1 << (a)) | (b)) }
+#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
 static const struct {
-	int subdev;
-	u32 mask;
+	u64 subdev;
+	u64 mask;
 } fifo_engine[] = {
-	_(NVDEV_ENGINE_GR      , (1 << NVDEV_ENGINE_SW)),
+	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW)),
 	_(NVDEV_ENGINE_VP      , 0),
 	_(NVDEV_ENGINE_PPP     , 0),
 	_(NVDEV_ENGINE_BSP     , 0),
@@ -138,6 +138,9 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
 	case NVDEV_ENGINE_GR   :
 	case NVDEV_ENGINE_COPY0:
 	case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
 	default:
 		return -EINVAL;
 	}
@@ -172,14 +175,13 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
 	case NVDEV_ENGINE_GR   :
 	case NVDEV_ENGINE_COPY0:
 	case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
 	default:
 		return -EINVAL;
 	}
 
-	nv_wo32(base, addr + 0x00, 0x00000000);
-	nv_wo32(base, addr + 0x04, 0x00000000);
-	bar->flush(bar);
-
 	nv_wr32(priv, 0x002634, chan->base.chid);
 	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
 		nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -187,6 +189,9 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
 			return -EBUSY;
 	}
 
+	nv_wo32(base, addr + 0x00, 0x00000000);
+	nv_wo32(base, addr + 0x04, 0x00000000);
+	bar->flush(bar);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index 618528248457..e30a9c5ff1fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv03_graph_gdi_omthds[] = {
-	{ 0x0184, nv01_graph_mthd_bind_patt },
-	{ 0x0188, nv04_graph_mthd_bind_rop },
-	{ 0x018c, nv04_graph_mthd_bind_beta1 },
-	{ 0x0190, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv04_graph_gdi_omthds[] = {
-	{ 0x0188, nv04_graph_mthd_bind_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv01_graph_blit_omthds[] = {
-	{ 0x0184, nv01_graph_mthd_bind_chroma },
-	{ 0x0188, nv01_graph_mthd_bind_clip },
-	{ 0x018c, nv01_graph_mthd_bind_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
-	{ 0x019c, nv04_graph_mthd_bind_surf_src },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+	{ 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv04_graph_blit_omthds[] = {
-	{ 0x0184, nv01_graph_mthd_bind_chroma },
-	{ 0x0188, nv01_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_beta4 },
-	{ 0x019c, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+	{ 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv04_graph_iifc_omthds[] = {
-	{ 0x0188, nv01_graph_mthd_bind_chroma },
-	{ 0x018c, nv01_graph_mthd_bind_clip },
-	{ 0x0190, nv04_graph_mthd_bind_patt },
-	{ 0x0194, nv04_graph_mthd_bind_rop },
-	{ 0x0198, nv04_graph_mthd_bind_beta1 },
-	{ 0x019c, nv04_graph_mthd_bind_beta4 },
-	{ 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
-	{ 0x03e4, nv04_graph_mthd_set_operation },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
+	{ 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
+	{ 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
+	{ 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+	{ 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv01_graph_ifc_omthds[] = {
-	{ 0x0184, nv01_graph_mthd_bind_chroma },
-	{ 0x0188, nv01_graph_mthd_bind_clip },
-	{ 0x018c, nv01_graph_mthd_bind_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv04_graph_ifc_omthds[] = {
-	{ 0x0184, nv01_graph_mthd_bind_chroma },
-	{ 0x0188, nv01_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_beta4 },
-	{ 0x019c, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+	{ 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv03_graph_sifc_omthds[] = {
-	{ 0x0184, nv01_graph_mthd_bind_chroma },
-	{ 0x0188, nv01_graph_mthd_bind_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv04_graph_sifc_omthds[] = {
-	{ 0x0184, nv01_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv03_graph_sifm_omthds[] = {
-	{ 0x0188, nv01_graph_mthd_bind_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x0304, nv04_graph_mthd_set_operation },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x0304, 0x0304, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv04_graph_sifm_omthds[] = {
-	{ 0x0188, nv04_graph_mthd_bind_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x0304, nv04_graph_mthd_set_operation },
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x0304, 0x0304, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv04_graph_surf3d_omthds[] = {
-	{ 0x02f8, nv04_graph_mthd_surf3d_clip_h },
-	{ 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+	{ 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
 	{}
 };
 
 static struct nouveau_omthds
 nv03_graph_ttri_omthds[] = {
-	{ 0x0188, nv01_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_surf_color },
-	{ 0x0190, nv04_graph_mthd_bind_surf_zeta },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
 	{}
 };
 
 static struct nouveau_omthds
 nv01_graph_prim_omthds[] = {
-	{ 0x0184, nv01_graph_mthd_bind_clip },
-	{ 0x0188, nv01_graph_mthd_bind_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
 static struct nouveau_omthds
 nv04_graph_prim_omthds[] = {
-	{ 0x0184, nv01_graph_mthd_bind_clip },
-	{ 0x0188, nv04_graph_mthd_bind_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
 	{}
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 92521c89e77f..5c0f843ea249 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv17_celcius_omthds[] = {
-	{ 0x1638, nv17_graph_mthd_lma_window },
-	{ 0x163c, nv17_graph_mthd_lma_window },
-	{ 0x1640, nv17_graph_mthd_lma_window },
-	{ 0x1644, nv17_graph_mthd_lma_window },
-	{ 0x1658, nv17_graph_mthd_lma_enable },
+	{ 0x1638, 0x1638, nv17_graph_mthd_lma_window },
+	{ 0x163c, 0x163c, nv17_graph_mthd_lma_window },
+	{ 0x1640, 0x1640, nv17_graph_mthd_lma_window },
+	{ 0x1644, 0x1644, nv17_graph_mthd_lma_window },
+	{ 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
 	{}
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 8f3f619c4a78..5b20401bf911 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
 	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
 	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
 
-	if (nv_device(engine)->card_type == NV_20) {
+	if (nv_device(engine)->chipset != 0x34) {
 		nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
 		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
 		nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
@@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
 	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_info(priv, "");
+		nv_error(priv, "");
 		nouveau_bitfield_print(nv10_graph_intr_name, show);
 		printk(" nsource:");
 		nouveau_bitfield_print(nv04_graph_nsource, nsource);
 		printk(" nstatus:");
 		nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
 		printk("\n");
-		nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
+		nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
 			chid, subc, class, mthd, data);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index cc6574eeb80e..0b36dd3deebd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
 
 	switch (nv_device(priv)->chipset) {
 	case 0x40:
-	case 0x41: /* guess */
+	case 0x41:
 	case 0x42:
 	case 0x43:
-	case 0x45: /* guess */
+	case 0x45:
 	case 0x4e:
 		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
 		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
@@ -227,6 +227,21 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
 		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
 		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
 		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+		switch (nv_device(priv)->chipset) {
+		case 0x40:
+		case 0x45:
+			nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+			nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+			break;
+		case 0x41:
+		case 0x42:
+		case 0x43:
+			nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+			nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+			break;
+		default:
+			break;
+		}
 		break;
 	case 0x44:
 	case 0x4a:
@@ -235,18 +250,31 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
 		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
 		break;
 	case 0x46:
+	case 0x4c:
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-	case 0x4c:
+	case 0x63:
 	case 0x67:
-	default:
+	case 0x68:
 		nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
 		nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
 		nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
 		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
 		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
 		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+		switch (nv_device(priv)->chipset) {
+		case 0x47:
+		case 0x49:
+		case 0x4b:
+			nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+			nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
 		break;
 	}
 
@@ -293,7 +321,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
 	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_info(priv, "");
+		nv_error(priv, "");
 		nouveau_bitfield_print(nv10_graph_intr_name, show);
 		printk(" nsource:");
 		nouveau_bitfield_print(nv04_graph_nsource, nsource);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index ab3b9dcaf478..b1c3d835b4c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -184,6 +184,65 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine)
 	return 0;
 }
 
+static const struct nouveau_bitfield nv50_pgraph_status[] = {
+	{ 0x00000001, "BUSY" }, /* set when any bit is set */
+	{ 0x00000002, "DISPATCH" },
+	{ 0x00000004, "UNK2" },
+	{ 0x00000008, "UNK3" },
+	{ 0x00000010, "UNK4" },
+	{ 0x00000020, "UNK5" },
+	{ 0x00000040, "M2MF" },
+	{ 0x00000080, "UNK7" },
+	{ 0x00000100, "CTXPROG" },
+	{ 0x00000200, "VFETCH" },
+	{ 0x00000400, "CCACHE_UNK4" },
+	{ 0x00000800, "STRMOUT_GSCHED_UNK5" },
+	{ 0x00001000, "UNK14XX" },
+	{ 0x00002000, "UNK24XX_CSCHED" },
+	{ 0x00004000, "UNK1CXX" },
+	{ 0x00008000, "CLIPID" },
+	{ 0x00010000, "ZCULL" },
+	{ 0x00020000, "ENG2D" },
+	{ 0x00040000, "UNK34XX" },
+	{ 0x00080000, "TPRAST" },
+	{ 0x00100000, "TPROP" },
+	{ 0x00200000, "TEX" },
+	{ 0x00400000, "TPVP" },
+	{ 0x00800000, "MP" },
+	{ 0x01000000, "ROP" },
+	{}
+};
+
+static const char *const nv50_pgraph_vstatus_0[] = {
+	"VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_1[] = {
+	"TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_2[] = {
+	"UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
+	"ROP", NULL
+};
+
+static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
+		const char *const units[], u32 status)
+{
+	int i;
+
+	nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+
+	for (i = 0; units[i] && status; i++) {
+		if ((status & 7) == 1)
+			pr_cont(" %s", units[i]);
+		status >>= 3;
+	}
+	if (status)
+		pr_cont(" (invalid: 0x%x)", status);
+	pr_cont("\n");
+}
+
 static int
 nv84_graph_tlb_flush(struct nouveau_engine *engine)
 {
@@ -219,10 +278,19 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
 		 !(timeout = ptimer->read(ptimer) - start > 2000000000));
 
 	if (timeout) {
-		nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
-			      "0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
-			 nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
+		nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
+
+		tmp = nv_rd32(priv, 0x400700);
+		nv_error(priv, "PGRAPH_STATUS  : 0x%08x", tmp);
+		nouveau_bitfield_print(nv50_pgraph_status, tmp);
+		pr_cont("\n");
+
+		nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
+				nv_rd32(priv, 0x400380));
+		nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
+				nv_rd32(priv, 0x400384));
+		nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
+				nv_rd32(priv, 0x400388));
 	}
 
 	nv50_vm_flush_engine(&engine->base, 0x00);
@@ -453,13 +521,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
 		}
 		if (ustatus) {
 			if (display)
-				nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+				nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
 		}
 		nv_wr32(priv, ustatus_addr, 0xc0000000);
 	}
 
 	if (!tps && display)
-		nv_info(priv, "%s - No TPs claiming errors?\n", name);
+		nv_warn(priv, "%s - No TPs claiming errors?\n", name);
 }
 
 static int
@@ -718,13 +786,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
 	nv_wr32(priv, 0x400500, 0x00010001);
 
 	if (show) {
-		nv_info(priv, "");
+		nv_error(priv, "");
 		nouveau_bitfield_print(nv50_graph_intr_name, show);
 		printk("\n");
 		nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
 			       "mthd 0x%04x data 0x%08x\n",
 			 chid, (u64)inst << 12, subc, class, mthd, data);
-		nv50_fb_trap(nouveau_fb(priv), 1);
 	}
 
 	if (nv_rd32(priv, 0x400824) & (1 << 31))
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index c62f2d0f5f0a..47a02081d708 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -814,7 +814,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
 		nv_wr32(priv, 0x41a100, 0x00000002);
 		nv_wr32(priv, 0x409100, 0x00000002);
 		if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
-			nv_info(priv, "0x409800 wait failed\n");
+			nv_warn(priv, "0x409800 wait failed\n");
 
 		nv_wr32(priv, 0x409840, 0xffffffff);
 		nv_wr32(priv, 0x409500, 0x7fffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
index 9c715a25cecb..fde8e24415e4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -205,6 +205,7 @@
 #define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
 #define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
 #define NV20_PGRAPH_ZCOMP(i)                               (0x00400980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP0(i)                              (0x004009c0 + 4*(i))
 #define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
 #define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
 #define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
@@ -216,6 +217,7 @@
 #define NV47_PGRAPH_TSTATUS(i)                             (0x00400D0C + (i*16))
 #define NV04_PGRAPH_V_RAM                                  0x00400D40
 #define NV04_PGRAPH_W_RAM                                  0x00400D80
+#define NV47_PGRAPH_ZCOMP0(i)                              (0x00400e00 + 4*(i))
 #define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
 #define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
 #define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
@@ -261,9 +263,12 @@
 #define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
 #define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
 #define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
+#define NV47_PGRAPH_ZCOMP1(i)                              (0x004068c0 + 4*(i))
 #define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
 #define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
 #define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
 #define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
+#define NV40_PGRAPH_ZCOMP1(i)                              (0x00406980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP1(i)                              (0x004069c0 + 4*(i))
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 1f394a2629e7..9fd86375f4c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = {
 
 static struct nouveau_omthds
 nv31_mpeg_omthds[] = {
-	{ 0x0190, nv31_mpeg_mthd_dma },
-	{ 0x01a0, nv31_mpeg_mthd_dma },
-	{ 0x01b0, nv31_mpeg_mthd_dma },
+	{ 0x0190, 0x0190, nv31_mpeg_mthd_dma },
+	{ 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
+	{ 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
 	{}
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 8678a9996d57..bc7d12b30fc1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -157,7 +157,6 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
 
 	nv_wr32(priv, 0x00b100, stat);
 	nv_wr32(priv, 0x00b230, 0x00000001);
-	nv50_fb_trap(nouveau_fb(priv), 1);
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 50e7e0da1981..5a5b2a773ed7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -22,18 +22,18 @@
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/class.h>
+#include <core/engine.h>
 #include <core/engctx.h>
+#include <core/class.h>
 
 #include <engine/ppp.h>
 
 struct nv98_ppp_priv {
-	struct nouveau_ppp base;
+	struct nouveau_engine base;
 };
 
 struct nv98_ppp_chan {
-	struct nouveau_ppp_chan base;
+	struct nouveau_engctx base;
 };
 
 /*******************************************************************************
@@ -49,61 +49,16 @@ nv98_ppp_sclass[] = {
  * PPPP context
  ******************************************************************************/
 
-static int
-nv98_ppp_context_ctor(struct nouveau_object *parent,
-		      struct nouveau_object *engine,
-		      struct nouveau_oclass *oclass, void *data, u32 size,
-		      struct nouveau_object **pobject)
-{
-	struct nv98_ppp_chan *priv;
-	int ret;
-
-	ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
-					 0, 0, 0, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void
-nv98_ppp_context_dtor(struct nouveau_object *object)
-{
-	struct nv98_ppp_chan *priv = (void *)object;
-	nouveau_ppp_context_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_context_init(struct nouveau_object *object)
-{
-	struct nv98_ppp_chan *priv = (void *)object;
-	int ret;
-
-	ret = nouveau_ppp_context_init(&priv->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int
-nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
-{
-	struct nv98_ppp_chan *priv = (void *)object;
-	return nouveau_ppp_context_fini(&priv->base, suspend);
-}
-
 static struct nouveau_oclass
 nv98_ppp_cclass = {
 	.handle = NV_ENGCTX(PPP, 0x98),
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv98_ppp_context_ctor,
-		.dtor = nv98_ppp_context_dtor,
-		.init = nv98_ppp_context_init,
-		.fini = nv98_ppp_context_fini,
-		.rd32 = _nouveau_ppp_context_rd32,
-		.wr32 = _nouveau_ppp_context_wr32,
+		.ctor = _nouveau_engctx_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
 	},
 };
 
@@ -111,11 +66,6 @@ nv98_ppp_cclass = {
  * PPPP engine/subdev functions
  ******************************************************************************/
 
-static void
-nv98_ppp_intr(struct nouveau_subdev *subdev)
-{
-}
-
 static int
 nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	      struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +74,25 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	struct nv98_ppp_priv *priv;
 	int ret;
 
-	ret = nouveau_ppp_create(parent, engine, oclass, &priv);
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PPPP", "ppp", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
 	nv_subdev(priv)->unit = 0x00400002;
-	nv_subdev(priv)->intr = nv98_ppp_intr;
 	nv_engine(priv)->cclass = &nv98_ppp_cclass;
 	nv_engine(priv)->sclass = nv98_ppp_sclass;
 	return 0;
 }
 
-static void
-nv98_ppp_dtor(struct nouveau_object *object)
-{
-	struct nv98_ppp_priv *priv = (void *)object;
-	nouveau_ppp_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_init(struct nouveau_object *object)
-{
-	struct nv98_ppp_priv *priv = (void *)object;
-	int ret;
-
-	ret = nouveau_ppp_init(&priv->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int
-nv98_ppp_fini(struct nouveau_object *object, bool suspend)
-{
-	struct nv98_ppp_priv *priv = (void *)object;
-	return nouveau_ppp_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nv98_ppp_oclass = {
 	.handle = NV_ENGINE(PPP, 0x98),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv98_ppp_ctor,
-		.dtor = nv98_ppp_dtor,
-		.init = nv98_ppp_init,
-		.fini = nv98_ppp_fini,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644
index 000000000000..ebf0d860e2dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/ppp.h>
+
+struct nvc0_ppp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_sclass[] = {
+	{ 0x90b3, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_cclass = {
+	.handle = NV_ENGCTX(PPP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_ppp_init(struct nouveau_object *object)
+{
+	struct nvc0_ppp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x086010, 0x0000fff2);
+	nv_wr32(priv, 0x08601c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nvc0_ppp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
+				    "PPPP", "ppp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_engine(priv)->cclass = &nvc0_ppp_cclass;
+	nv_engine(priv)->sclass = nvc0_ppp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_ppp_oclass = {
+	.handle = NV_ENGINE(PPP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_ppp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nvc0_ppp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 3ca4c3aa90b7..2a859a31c30d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv04_software_omthds[] = {
-	{ 0x0150, nv04_software_set_ref },
-	{ 0x0500, nv04_software_flip },
+	{ 0x0150, 0x0150, nv04_software_set_ref },
+	{ 0x0500, 0x0500, nv04_software_flip },
 	{}
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index 6e699afbfdb7..a019364b1e13 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv10_software_omthds[] = {
-	{ 0x0500, nv10_software_flip },
+	{ 0x0500, 0x0500, nv10_software_flip },
 	{}
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index a2edcd38544a..b0e7e1c01ce6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv50_software_omthds[] = {
-	{ 0x018c, nv50_software_mthd_dma_vblsem },
-	{ 0x0400, nv50_software_mthd_vblsem_offset },
-	{ 0x0404, nv50_software_mthd_vblsem_value },
-	{ 0x0408, nv50_software_mthd_vblsem_release },
-	{ 0x0500, nv50_software_mthd_flip },
+	{ 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
+	{ 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
+	{ 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
+	{ 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
+	{ 0x0500, 0x0500, nv50_software_mthd_flip },
 	{}
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index b7b0d7e330d6..282a1cd1bc2f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nvc0_software_omthds[] = {
-	{ 0x0400, nvc0_software_mthd_vblsem_offset },
-	{ 0x0404, nvc0_software_mthd_vblsem_offset },
-	{ 0x0408, nvc0_software_mthd_vblsem_value },
-	{ 0x040c, nvc0_software_mthd_vblsem_release },
-	{ 0x0500, nvc0_software_mthd_flip },
+	{ 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
+	{ 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
+	{ 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
+	{ 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
+	{ 0x0500, 0x0500, nvc0_software_mthd_flip },
 	{}
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index dd23c80e5405..261cd96e6951 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -22,18 +22,13 @@
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/class.h>
 #include <core/engctx.h>
+#include <core/class.h>
 
 #include <engine/vp.h>
 
 struct nv84_vp_priv {
-	struct nouveau_vp base;
-};
-
-struct nv84_vp_chan {
-	struct nouveau_vp_chan base;
+	struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -49,61 +44,16 @@ nv84_vp_sclass[] = {
  * PVP context
  ******************************************************************************/
 
-static int
-nv84_vp_context_ctor(struct nouveau_object *parent,
-		     struct nouveau_object *engine,
-		     struct nouveau_oclass *oclass, void *data, u32 size,
-		     struct nouveau_object **pobject)
-{
-	struct nv84_vp_chan *priv;
-	int ret;
-
-	ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
-					0, 0, 0, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void
-nv84_vp_context_dtor(struct nouveau_object *object)
-{
-	struct nv84_vp_chan *priv = (void *)object;
-	nouveau_vp_context_destroy(&priv->base);
-}
-
-static int
-nv84_vp_context_init(struct nouveau_object *object)
-{
-	struct nv84_vp_chan *priv = (void *)object;
-	int ret;
-
-	ret = nouveau_vp_context_init(&priv->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int
-nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
-{
-	struct nv84_vp_chan *priv = (void *)object;
-	return nouveau_vp_context_fini(&priv->base, suspend);
-}
-
 static struct nouveau_oclass
 nv84_vp_cclass = {
 	.handle = NV_ENGCTX(VP, 0x84),
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv84_vp_context_ctor,
-		.dtor = nv84_vp_context_dtor,
-		.init = nv84_vp_context_init,
-		.fini = nv84_vp_context_fini,
-		.rd32 = _nouveau_vp_context_rd32,
-		.wr32 = _nouveau_vp_context_wr32,
+		.ctor = _nouveau_engctx_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
 	},
 };
 
@@ -111,11 +61,6 @@ nv84_vp_cclass = {
  * PVP engine/subdev functions
  ******************************************************************************/
 
-static void
-nv84_vp_intr(struct nouveau_subdev *subdev)
-{
-}
-
 static int
 nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	struct nv84_vp_priv *priv;
 	int ret;
 
-	ret = nouveau_vp_create(parent, engine, oclass, &priv);
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PVP", "vp", &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
 	nv_subdev(priv)->unit = 0x01020000;
-	nv_subdev(priv)->intr = nv84_vp_intr;
 	nv_engine(priv)->cclass = &nv84_vp_cclass;
 	nv_engine(priv)->sclass = nv84_vp_sclass;
 	return 0;
 }
 
-static void
-nv84_vp_dtor(struct nouveau_object *object)
-{
-	struct nv84_vp_priv *priv = (void *)object;
-	nouveau_vp_destroy(&priv->base);
-}
-
-static int
-nv84_vp_init(struct nouveau_object *object)
-{
-	struct nv84_vp_priv *priv = (void *)object;
-	int ret;
-
-	ret = nouveau_vp_init(&priv->base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int
-nv84_vp_fini(struct nouveau_object *object, bool suspend)
-{
-	struct nv84_vp_priv *priv = (void *)object;
-	return nouveau_vp_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nv84_vp_oclass = {
 	.handle = NV_ENGINE(VP, 0x84),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv84_vp_ctor,
-		.dtor = nv84_vp_dtor,
-		.init = nv84_vp_init,
-		.fini = nv84_vp_fini,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644
index 000000000000..f761949d7039
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nvc0_vp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_sclass[] = {
+	{ 0x90b2, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_cclass = {
+	.handle = NV_ENGCTX(VP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_vp_init(struct nouveau_object *object)
+{
+	struct nvc0_vp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x085010, 0x0000fff2);
+	nv_wr32(priv, 0x08501c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nvc0_vp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+				    "PVP", "vp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00020000;
+	nv_engine(priv)->cclass = &nvc0_vp_cclass;
+	nv_engine(priv)->sclass = nvc0_vp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_vp_oclass = {
+	.handle = NV_ENGINE(VP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_vp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nvc0_vp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644
index 000000000000..2384ce5dbe16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nve0_vp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_sclass[] = {
+	{ 0x95b2, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_cclass = {
+	.handle = NV_ENGCTX(VP, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_vp_init(struct nouveau_object *object)
+{
+	struct nve0_vp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x085010, 0x0000fff2);
+	nv_wr32(priv, 0x08501c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nve0_vp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+				    "PVP", "vp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00020000;
+	nv_engine(priv)->cclass = &nve0_vp_cclass;
+	nv_engine(priv)->sclass = nve0_vp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_vp_oclass = {
+	.handle = NV_ENGINE(VP, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_vp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nve0_vp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 6180ae9800fc..47c4b3a5bd3a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -23,6 +23,7 @@
 #define NV_DEVICE_DISABLE_COPY0                           0x0000008000000000ULL
 #define NV_DEVICE_DISABLE_COPY1                           0x0000010000000000ULL
 #define NV_DEVICE_DISABLE_UNK1C1                          0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VENC                            0x0000040000000000ULL
 
 struct nv_device_class {
 	u64 device;	/* device identifier, ~0 for client default */
@@ -52,11 +53,49 @@ struct nv_device_class {
 #define NV_DMA_ACCESS_WR                                             0x00000200
 #define NV_DMA_ACCESS_RDWR                                           0x00000300
 
+/* NV50:NVC0 */
+#define NV50_DMA_CONF0_ENABLE                                        0x80000000
+#define NV50_DMA_CONF0_PRIV                                          0x00300000
+#define NV50_DMA_CONF0_PRIV_VM                                       0x00000000
+#define NV50_DMA_CONF0_PRIV_US                                       0x00100000
+#define NV50_DMA_CONF0_PRIV__S                                       0x00200000
+#define NV50_DMA_CONF0_PART                                          0x00030000
+#define NV50_DMA_CONF0_PART_VM                                       0x00000000
+#define NV50_DMA_CONF0_PART_256                                      0x00010000
+#define NV50_DMA_CONF0_PART_1KB                                      0x00020000
+#define NV50_DMA_CONF0_COMP                                          0x00000180
+#define NV50_DMA_CONF0_COMP_NONE                                     0x00000000
+#define NV50_DMA_CONF0_COMP_VM                                       0x00000180
+#define NV50_DMA_CONF0_TYPE                                          0x0000007f
+#define NV50_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NV50_DMA_CONF0_TYPE_VM                                       0x0000007f
+
+/* NVC0:NVD9 */
+#define NVC0_DMA_CONF0_ENABLE                                        0x80000000
+#define NVC0_DMA_CONF0_PRIV                                          0x00300000
+#define NVC0_DMA_CONF0_PRIV_VM                                       0x00000000
+#define NVC0_DMA_CONF0_PRIV_US                                       0x00100000
+#define NVC0_DMA_CONF0_PRIV__S                                       0x00200000
+#define NVC0_DMA_CONF0_UNKN /* PART? */                              0x00030000
+#define NVC0_DMA_CONF0_TYPE                                          0x000000ff
+#define NVC0_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NVC0_DMA_CONF0_TYPE_VM                                       0x000000ff
+
+/* NVD9- */
+#define NVD0_DMA_CONF0_ENABLE                                        0x80000000
+#define NVD0_DMA_CONF0_PAGE                                          0x00000400
+#define NVD0_DMA_CONF0_PAGE_LP                                       0x00000000
+#define NVD0_DMA_CONF0_PAGE_SP                                       0x00000400
+#define NVD0_DMA_CONF0_TYPE                                          0x000000ff
+#define NVD0_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NVD0_DMA_CONF0_TYPE_VM                                       0x000000ff
+
 struct nv_dma_class {
 	u32 flags;
 	u32 pad0;
 	u64 start;
 	u64 limit;
+	u32 conf0;
 };
 
 /* DMA FIFO channel classes
@@ -115,4 +154,190 @@ struct nve0_channel_ind_class {
 	u32 engine;
 };
 
+/* 5070: NV50_DISP
+ * 8270: NV84_DISP
+ * 8370: NVA0_DISP
+ * 8870: NV94_DISP
+ * 8570: NVA3_DISP
+ * 9070: NVD0_DISP
+ * 9170: NVE0_DISP
+ */
+
+#define NV50_DISP_CLASS                                              0x00005070
+#define NV84_DISP_CLASS                                              0x00008270
+#define NVA0_DISP_CLASS                                              0x00008370
+#define NV94_DISP_CLASS                                              0x00008870
+#define NVA3_DISP_CLASS                                              0x00008570
+#define NVD0_DISP_CLASS                                              0x00009070
+#define NVE0_DISP_CLASS                                              0x00009170
+
+#define NV50_DISP_SOR_MTHD                                           0x00010000
+#define NV50_DISP_SOR_MTHD_TYPE                                      0x0000f000
+#define NV50_DISP_SOR_MTHD_HEAD                                      0x00000018
+#define NV50_DISP_SOR_MTHD_LINK                                      0x00000004
+#define NV50_DISP_SOR_MTHD_OR                                        0x00000003
+
+#define NV50_DISP_SOR_PWR                                            0x00010000
+#define NV50_DISP_SOR_PWR_STATE                                      0x00000001
+#define NV50_DISP_SOR_PWR_STATE_ON                                   0x00000001
+#define NV50_DISP_SOR_PWR_STATE_OFF                                  0x00000000
+#define NVA3_DISP_SOR_HDA_ELD                                        0x00010100
+#define NV84_DISP_SOR_HDMI_PWR                                       0x00012000
+#define NV84_DISP_SOR_HDMI_PWR_STATE                                 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF                             0x00000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_ON                              0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET                         0x001f0000
+#define NV84_DISP_SOR_HDMI_PWR_REKEY                                 0x0000007f
+#define NV50_DISP_SOR_LVDS_SCRIPT                                    0x00013000
+#define NV50_DISP_SOR_LVDS_SCRIPT_ID                                 0x0000ffff
+#define NV94_DISP_SOR_DP_TRAIN                                       0x00016000
+#define NV94_DISP_SOR_DP_TRAIN_OP                                    0xf0000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN                            0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_INIT                               0x10000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_FINI                               0x20000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD                           0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF                       0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON                        0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN                               0x00000003
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED                      0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL                                      0x00016040
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME                                0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD                            0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH                            0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_WIDTH                                0x00001f00
+#define NV94_DISP_SOR_DP_LNKCTL_COUNT                                0x00000007
+#define NV94_DISP_SOR_DP_DRVCTL(l)                     ((l) * 0x40 + 0x00016100)
+#define NV94_DISP_SOR_DP_DRVCTL_VS                                   0x00000300
+#define NV94_DISP_SOR_DP_DRVCTL_PE                                   0x00000003
+
+#define NV50_DISP_DAC_MTHD                                           0x00020000
+#define NV50_DISP_DAC_MTHD_TYPE                                      0x0000f000
+#define NV50_DISP_DAC_MTHD_OR                                        0x00000003
+
+#define NV50_DISP_DAC_PWR                                            0x00020000
+#define NV50_DISP_DAC_PWR_HSYNC                                      0x00000001
+#define NV50_DISP_DAC_PWR_HSYNC_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_HSYNC_LO                                   0x00000001
+#define NV50_DISP_DAC_PWR_VSYNC                                      0x00000004
+#define NV50_DISP_DAC_PWR_VSYNC_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_VSYNC_LO                                   0x00000004
+#define NV50_DISP_DAC_PWR_DATA                                       0x00000010
+#define NV50_DISP_DAC_PWR_DATA_ON                                    0x00000000
+#define NV50_DISP_DAC_PWR_DATA_LO                                    0x00000010
+#define NV50_DISP_DAC_PWR_STATE                                      0x00000040
+#define NV50_DISP_DAC_PWR_STATE_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_STATE_OFF                                  0x00000040
+#define NV50_DISP_DAC_LOAD                                           0x0002000c
+#define NV50_DISP_DAC_LOAD_VALUE                                     0x00000007
+
+struct nv50_display_class {
+};
+
+/* 507a: NV50_DISP_CURS
+ * 827a: NV84_DISP_CURS
+ * 837a: NVA0_DISP_CURS
+ * 887a: NV94_DISP_CURS
+ * 857a: NVA3_DISP_CURS
+ * 907a: NVD0_DISP_CURS
+ * 917a: NVE0_DISP_CURS
+ */
+
+#define NV50_DISP_CURS_CLASS                                         0x0000507a
+#define NV84_DISP_CURS_CLASS                                         0x0000827a
+#define NVA0_DISP_CURS_CLASS                                         0x0000837a
+#define NV94_DISP_CURS_CLASS                                         0x0000887a
+#define NVA3_DISP_CURS_CLASS                                         0x0000857a
+#define NVD0_DISP_CURS_CLASS                                         0x0000907a
+#define NVE0_DISP_CURS_CLASS                                         0x0000917a
+
+struct nv50_display_curs_class {
+	u32 head;
+};
+
+/* 507b: NV50_DISP_OIMM
+ * 827b: NV84_DISP_OIMM
+ * 837b: NVA0_DISP_OIMM
+ * 887b: NV94_DISP_OIMM
+ * 857b: NVA3_DISP_OIMM
+ * 907b: NVD0_DISP_OIMM
+ * 917b: NVE0_DISP_OIMM
+ */
+
+#define NV50_DISP_OIMM_CLASS                                         0x0000507b
+#define NV84_DISP_OIMM_CLASS                                         0x0000827b
+#define NVA0_DISP_OIMM_CLASS                                         0x0000837b
+#define NV94_DISP_OIMM_CLASS                                         0x0000887b
+#define NVA3_DISP_OIMM_CLASS                                         0x0000857b
+#define NVD0_DISP_OIMM_CLASS                                         0x0000907b
+#define NVE0_DISP_OIMM_CLASS                                         0x0000917b
+
+struct nv50_display_oimm_class {
+	u32 head;
+};
+
+/* 507c: NV50_DISP_SYNC
+ * 827c: NV84_DISP_SYNC
+ * 837c: NVA0_DISP_SYNC
+ * 887c: NV94_DISP_SYNC
+ * 857c: NVA3_DISP_SYNC
+ * 907c: NVD0_DISP_SYNC
+ * 917c: NVE0_DISP_SYNC
+ */
+
+#define NV50_DISP_SYNC_CLASS                                         0x0000507c
+#define NV84_DISP_SYNC_CLASS                                         0x0000827c
+#define NVA0_DISP_SYNC_CLASS                                         0x0000837c
+#define NV94_DISP_SYNC_CLASS                                         0x0000887c
+#define NVA3_DISP_SYNC_CLASS                                         0x0000857c
+#define NVD0_DISP_SYNC_CLASS                                         0x0000907c
+#define NVE0_DISP_SYNC_CLASS                                         0x0000917c
+
+struct nv50_display_sync_class {
+	u32 pushbuf;
+	u32 head;
+};
+
+/* 507d: NV50_DISP_MAST
+ * 827d: NV84_DISP_MAST
+ * 837d: NVA0_DISP_MAST
+ * 887d: NV94_DISP_MAST
+ * 857d: NVA3_DISP_MAST
+ * 907d: NVD0_DISP_MAST
+ * 917d: NVE0_DISP_MAST
+ */
+
+#define NV50_DISP_MAST_CLASS                                         0x0000507d
+#define NV84_DISP_MAST_CLASS                                         0x0000827d
+#define NVA0_DISP_MAST_CLASS                                         0x0000837d
+#define NV94_DISP_MAST_CLASS                                         0x0000887d
+#define NVA3_DISP_MAST_CLASS                                         0x0000857d
+#define NVD0_DISP_MAST_CLASS                                         0x0000907d
+#define NVE0_DISP_MAST_CLASS                                         0x0000917d
+
+struct nv50_display_mast_class {
+	u32 pushbuf;
+};
+
+/* 507e: NV50_DISP_OVLY
+ * 827e: NV84_DISP_OVLY
+ * 837e: NVA0_DISP_OVLY
+ * 887e: NV94_DISP_OVLY
+ * 857e: NVA3_DISP_OVLY
+ * 907e: NVD0_DISP_OVLY
+ * 917e: NVE0_DISP_OVLY
+ */
+
+#define NV50_DISP_OVLY_CLASS                                         0x0000507e
+#define NV84_DISP_OVLY_CLASS                                         0x0000827e
+#define NVA0_DISP_OVLY_CLASS                                         0x0000837e
+#define NV94_DISP_OVLY_CLASS                                         0x0000887e
+#define NVA3_DISP_OVLY_CLASS                                         0x0000857e
+#define NVD0_DISP_OVLY_CLASS                                         0x0000907e
+#define NVE0_DISP_OVLY_CLASS                                         0x0000917e
+
+struct nv50_display_ovly_class {
+	u32 pushbuf;
+	u32 head;
+};
+
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
index 8a947b6872eb..2fd48b564c7d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/engctx.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -39,6 +39,9 @@ void nouveau_engctx_destroy(struct nouveau_engctx *);
 int  nouveau_engctx_init(struct nouveau_engctx *);
 int  nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
 
+int  _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
+			  struct nouveau_oclass *, void *, u32,
+			  struct nouveau_object **);
 void _nouveau_engctx_dtor(struct nouveau_object *);
 int  _nouveau_engctx_init(struct nouveau_object *);
 int  _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644
index 000000000000..1edec386ab36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
@@ -0,0 +1,81 @@
+#ifndef __NOUVEAU_FALCON_H__
+#define __NOUVEAU_FALCON_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+struct nouveau_falcon_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d)                         \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_falcon_context_destroy(d)                                      \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_falcon_context_init(d)                                         \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_falcon_context_fini(d,s)                                       \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
+#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
+#define _nouveau_falcon_context_init _nouveau_engctx_init
+#define _nouveau_falcon_context_fini _nouveau_engctx_fini
+#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_falcon_data {
+	bool external;
+};
+
+struct nouveau_falcon {
+	struct nouveau_engine base;
+
+	u32 addr;
+	u8  version;
+	u8  secret;
+
+	struct nouveau_gpuobj *core;
+	bool external;
+
+	struct {
+		u32 limit;
+		u32 *data;
+		u32  size;
+	} code;
+
+	struct {
+		u32 limit;
+		u32 *data;
+		u32  size;
+	} data;
+};
+
+#define nv_falcon(priv) (&(priv)->base)
+
+#define nouveau_falcon_create(p,e,c,b,d,i,f,r)                                 \
+	nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f),              \
+			       sizeof(**r),(void **)r)
+#define nouveau_falcon_destroy(p)                                              \
+	nouveau_engine_destroy(&(p)->base)
+#define nouveau_falcon_init(p) ({                                              \
+	struct nouveau_falcon *falcon = (p);                                   \
+	_nouveau_falcon_init(nv_object(falcon));                               \
+})
+#define nouveau_falcon_fini(p,s) ({                                            \
+	struct nouveau_falcon *falcon = (p);                                   \
+	_nouveau_falcon_fini(nv_object(falcon), (s));                          \
+})
+
+int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, u32, bool, const char *,
+			   const char *, int, void **);
+
+#define _nouveau_falcon_dtor _nouveau_engine_dtor
+int  _nouveau_falcon_init(struct nouveau_object *);
+int  _nouveau_falcon_fini(struct nouveau_object *, bool);
+u32  _nouveau_falcon_rd32(struct nouveau_object *, u64);
+void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
index 6eaff79377ae..b3b9ce4e9d38 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
 void _nouveau_gpuobj_dtor(struct nouveau_object *);
 int  _nouveau_gpuobj_init(struct nouveau_object *);
 int  _nouveau_gpuobj_fini(struct nouveau_object *, bool);
-u32  _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
-void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
+u32  _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
+void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 975137ba34a6..2514e81ade02 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -21,6 +21,12 @@ struct nouveau_mm {
 	int heap_nodes;
 };
 
+static inline bool
+nouveau_mm_initialised(struct nouveau_mm *mm)
+{
+	return mm->block_size != 0;
+}
+
 int  nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
 int  nouveau_mm_fini(struct nouveau_mm *);
 int  nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 486f1a9217fd..5982935ee23a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -70,7 +70,8 @@ nv_pclass(struct nouveau_object *parent, u32 oclass)
 }
 
 struct nouveau_omthds {
-	u32 method;
+	u32 start;
+	u32 limit;
 	int (*call)(struct nouveau_object *, u32, void *, u32);
 };
 
@@ -81,12 +82,12 @@ struct nouveau_ofuncs {
 	void (*dtor)(struct nouveau_object *);
 	int  (*init)(struct nouveau_object *);
 	int  (*fini)(struct nouveau_object *, bool suspend);
-	u8   (*rd08)(struct nouveau_object *, u32 offset);
-	u16  (*rd16)(struct nouveau_object *, u32 offset);
-	u32  (*rd32)(struct nouveau_object *, u32 offset);
-	void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
-	void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
-	void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
+	u8   (*rd08)(struct nouveau_object *, u64 offset);
+	u16  (*rd16)(struct nouveau_object *, u64 offset);
+	u32  (*rd32)(struct nouveau_object *, u64 offset);
+	void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
+	void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
+	void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
 };
 
 static inline struct nouveau_ofuncs *
@@ -109,21 +110,27 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
 void nouveau_object_debug(void);
 
 static inline int
-nv_call(void *obj, u32 mthd, u32 data)
+nv_exec(void *obj, u32 mthd, void *data, u32 size)
 {
 	struct nouveau_omthds *method = nv_oclass(obj)->omthds;
 
 	while (method && method->call) {
-		if (method->method == mthd)
-			return method->call(obj, mthd, &data, sizeof(data));
+		if (mthd >= method->start && mthd <= method->limit)
+			return method->call(obj, mthd, data, size);
 		method++;
 	}
 
 	return -EINVAL;
 }
 
+static inline int
+nv_call(void *obj, u32 mthd, u32 data)
+{
+	return nv_exec(obj, mthd, &data, sizeof(data));
+}
+
 static inline u8
-nv_ro08(void *obj, u32 addr)
+nv_ro08(void *obj, u64 addr)
 {
 	u8 data = nv_ofuncs(obj)->rd08(obj, addr);
 	nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
@@ -131,7 +138,7 @@ nv_ro08(void *obj, u32 addr)
 }
 
 static inline u16
-nv_ro16(void *obj, u32 addr)
+nv_ro16(void *obj, u64 addr)
 {
 	u16 data = nv_ofuncs(obj)->rd16(obj, addr);
 	nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
@@ -139,7 +146,7 @@ nv_ro16(void *obj, u32 addr)
 }
 
 static inline u32
-nv_ro32(void *obj, u32 addr)
+nv_ro32(void *obj, u64 addr)
 {
 	u32 data = nv_ofuncs(obj)->rd32(obj, addr);
 	nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
@@ -147,28 +154,28 @@ nv_ro32(void *obj, u32 addr)
 }
 
 static inline void
-nv_wo08(void *obj, u32 addr, u8 data)
+nv_wo08(void *obj, u64 addr, u8 data)
 {
 	nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
 	nv_ofuncs(obj)->wr08(obj, addr, data);
 }
 
 static inline void
-nv_wo16(void *obj, u32 addr, u16 data)
+nv_wo16(void *obj, u64 addr, u16 data)
 {
 	nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
 	nv_ofuncs(obj)->wr16(obj, addr, data);
 }
 
 static inline void
-nv_wo32(void *obj, u32 addr, u32 data)
+nv_wo32(void *obj, u64 addr, u32 data)
 {
 	nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
 	nv_ofuncs(obj)->wr32(obj, addr, data);
 }
 
 static inline u32
-nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
+nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
 {
 	u32 temp = nv_ro32(obj, addr);
 	nv_wo32(obj, addr, (temp & ~mask) | data);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 3c2e940eb0f8..31cd852c96df 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -14,7 +14,7 @@ struct nouveau_parent {
 	struct nouveau_object base;
 
 	struct nouveau_sclass *sclass;
-	u32 engine;
+	u64 engine;
 
 	int  (*context_attach)(struct nouveau_object *,
 			       struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
index 75d1ed5f85fd..13ccdf54dfad 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -1,45 +1,8 @@
 #ifndef __NOUVEAU_BSP_H__
 #define __NOUVEAU_BSP_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_bsp_chan {
-	struct nouveau_engctx base;
-};
-
-#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d)                            \
-	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_bsp_context_destroy(d)                                         \
-	nouveau_engctx_destroy(&(d)->base)
-#define nouveau_bsp_context_init(d)                                            \
-	nouveau_engctx_init(&(d)->base)
-#define nouveau_bsp_context_fini(d,s)                                          \
-	nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_bsp_context_init _nouveau_engctx_init
-#define _nouveau_bsp_context_fini _nouveau_engctx_fini
-#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_bsp {
-	struct nouveau_engine base;
-};
-
-#define nouveau_bsp_create(p,e,c,d)                                            \
-	nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
-#define nouveau_bsp_destroy(d)                                                 \
-	nouveau_engine_destroy(&(d)->base)
-#define nouveau_bsp_init(d)                                                    \
-	nouveau_engine_init(&(d)->base)
-#define nouveau_bsp_fini(d,s)                                                  \
-	nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_dtor _nouveau_engine_dtor
-#define _nouveau_bsp_init _nouveau_engine_init
-#define _nouveau_bsp_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv84_bsp_oclass;
+extern struct nouveau_oclass nvc0_bsp_oclass;
+extern struct nouveau_oclass nve0_bsp_oclass;
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
index 70b9d8c5fcf5..8cad2cf28cef 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/copy.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -1,44 +1,7 @@
 #ifndef __NOUVEAU_COPY_H__
 #define __NOUVEAU_COPY_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_copy_chan {
-	struct nouveau_engctx base;
-};
-
-#define nouveau_copy_context_create(p,e,c,g,s,a,f,d)                           \
-	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_copy_context_destroy(d)                                        \
-	nouveau_engctx_destroy(&(d)->base)
-#define nouveau_copy_context_init(d)                                           \
-	nouveau_engctx_init(&(d)->base)
-#define nouveau_copy_context_fini(d,s)                                         \
-	nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
-#define _nouveau_copy_context_init _nouveau_engctx_init
-#define _nouveau_copy_context_fini _nouveau_engctx_fini
-#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_copy {
-	struct nouveau_engine base;
-};
-
-#define nouveau_copy_create(p,e,c,y,i,d)                                       \
-	nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
-#define nouveau_copy_destroy(d)                                                \
-	nouveau_engine_destroy(&(d)->base)
-#define nouveau_copy_init(d)                                                   \
-	nouveau_engine_init(&(d)->base)
-#define nouveau_copy_fini(d,s)                                                 \
-	nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_copy_dtor _nouveau_engine_dtor
-#define _nouveau_copy_init _nouveau_engine_init
-#define _nouveau_copy_fini _nouveau_engine_fini
+void nva3_copy_intr(struct nouveau_subdev *);
 
 extern struct nouveau_oclass nva3_copy_oclass;
 extern struct nouveau_oclass nvc0_copy0_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
index e3674743baaa..db975618e937 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -1,45 +1,6 @@
 #ifndef __NOUVEAU_CRYPT_H__
 #define __NOUVEAU_CRYPT_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_crypt_chan {
-	struct nouveau_engctx base;
-};
-
-#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d)                          \
-	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_crypt_context_destroy(d)                                       \
-	nouveau_engctx_destroy(&(d)->base)
-#define nouveau_crypt_context_init(d)                                          \
-	nouveau_engctx_init(&(d)->base)
-#define nouveau_crypt_context_fini(d,s)                                        \
-	nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
-#define _nouveau_crypt_context_init _nouveau_engctx_init
-#define _nouveau_crypt_context_fini _nouveau_engctx_fini
-#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_crypt {
-	struct nouveau_engine base;
-};
-
-#define nouveau_crypt_create(p,e,c,d)                                          \
-	nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
-#define nouveau_crypt_destroy(d)                                               \
-	nouveau_engine_destroy(&(d)->base)
-#define nouveau_crypt_init(d)                                                  \
-	nouveau_engine_init(&(d)->base)
-#define nouveau_crypt_fini(d,s)                                                \
-	nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_dtor _nouveau_engine_dtor
-#define _nouveau_crypt_init _nouveau_engine_init
-#define _nouveau_crypt_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv84_crypt_oclass;
 extern struct nouveau_oclass nv98_crypt_oclass;
 
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 38ec1252cbaa..46948285f3e7 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -39,6 +39,11 @@ nouveau_disp(void *obj)
 
 extern struct nouveau_oclass nv04_disp_oclass;
 extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nv84_disp_oclass;
+extern struct nouveau_oclass nva0_disp_oclass;
+extern struct nouveau_oclass nv94_disp_oclass;
+extern struct nouveau_oclass nva3_disp_oclass;
 extern struct nouveau_oclass nvd0_disp_oclass;
+extern struct nouveau_oclass nve0_disp_oclass;
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index 700ccbb1941f..b28914ed1752 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,29 +12,17 @@ struct nouveau_dmaobj {
 	u32 access;
 	u64 start;
 	u64 limit;
+	u32 conf0;
 };
 
-#define nouveau_dmaobj_create(p,e,c,a,s,d)                                     \
-	nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
-#define nouveau_dmaobj_destroy(p)                                              \
-	nouveau_object_destroy(&(p)->base)
-#define nouveau_dmaobj_init(p)                                                 \
-	nouveau_object_init(&(p)->base)
-#define nouveau_dmaobj_fini(p,s)                                               \
-	nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
-			   struct nouveau_oclass *, void *data, u32 size,
-			   int length, void **);
-
-#define _nouveau_dmaobj_dtor nouveau_object_destroy
-#define _nouveau_dmaobj_init nouveau_object_init
-#define _nouveau_dmaobj_fini nouveau_object_fini
-
 struct nouveau_dmaeng {
 	struct nouveau_engine base;
-	int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
-		    struct nouveau_dmaobj *, struct nouveau_gpuobj **);
+
+	/* creates a "physical" dma object from a struct nouveau_dmaobj */
+	int (*bind)(struct nouveau_dmaeng *dmaeng,
+		    struct nouveau_object *parent,
+		    struct nouveau_dmaobj *dmaobj,
+		    struct nouveau_gpuobj **);
 };
 
 #define nouveau_dmaeng_create(p,e,c,d)                                         \
@@ -53,5 +41,8 @@ struct nouveau_dmaeng {
 extern struct nouveau_oclass nv04_dmaeng_oclass;
 extern struct nouveau_oclass nv50_dmaeng_oclass;
 extern struct nouveau_oclass nvc0_dmaeng_oclass;
+extern struct nouveau_oclass nvd0_dmaeng_oclass;
+
+extern struct nouveau_oclass nouveau_dmaobj_sclass[];
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index d67fed1e3970..f18846c8c6fe 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -33,15 +33,15 @@ int  nouveau_fifo_channel_create_(struct nouveau_object *,
 				  struct nouveau_object *,
 				  struct nouveau_oclass *,
 				  int bar, u32 addr, u32 size, u32 push,
-				  u32 engmask, int len, void **);
+				  u64 engmask, int len, void **);
 void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
 
 #define _nouveau_fifo_channel_init _nouveau_namedb_init
 #define _nouveau_fifo_channel_fini _nouveau_namedb_fini
 
 void _nouveau_fifo_channel_dtor(struct nouveau_object *);
-u32  _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
-void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
+u32  _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
 
 struct nouveau_fifo_base {
 	struct nouveau_gpuobj base;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
index 74d554fb3281..0a66781e8cf1 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -1,45 +1,7 @@
 #ifndef __NOUVEAU_PPP_H__
 #define __NOUVEAU_PPP_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_ppp_chan {
-	struct nouveau_engctx base;
-};
-
-#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d)                            \
-	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_ppp_context_destroy(d)                                         \
-	nouveau_engctx_destroy(&(d)->base)
-#define nouveau_ppp_context_init(d)                                            \
-	nouveau_engctx_init(&(d)->base)
-#define nouveau_ppp_context_fini(d,s)                                          \
-	nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_ppp_context_init _nouveau_engctx_init
-#define _nouveau_ppp_context_fini _nouveau_engctx_fini
-#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_ppp {
-	struct nouveau_engine base;
-};
-
-#define nouveau_ppp_create(p,e,c,d)                                            \
-	nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
-#define nouveau_ppp_destroy(d)                                                 \
-	nouveau_engine_destroy(&(d)->base)
-#define nouveau_ppp_init(d)                                                    \
-	nouveau_engine_init(&(d)->base)
-#define nouveau_ppp_fini(d,s)                                                  \
-	nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_dtor _nouveau_engine_dtor
-#define _nouveau_ppp_init _nouveau_engine_init
-#define _nouveau_ppp_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv98_ppp_oclass;
+extern struct nouveau_oclass nvc0_ppp_oclass;
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
index 05cd08fba377..d7b287b115bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -1,45 +1,8 @@
 #ifndef __NOUVEAU_VP_H__
 #define __NOUVEAU_VP_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_vp_chan {
-	struct nouveau_engctx base;
-};
-
-#define nouveau_vp_context_create(p,e,c,g,s,a,f,d)                             \
-	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_vp_context_destroy(d)                                          \
-	nouveau_engctx_destroy(&(d)->base)
-#define nouveau_vp_context_init(d)                                             \
-	nouveau_engctx_init(&(d)->base)
-#define nouveau_vp_context_fini(d,s)                                           \
-	nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_vp_context_init _nouveau_engctx_init
-#define _nouveau_vp_context_fini _nouveau_engctx_fini
-#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_vp {
-	struct nouveau_engine base;
-};
-
-#define nouveau_vp_create(p,e,c,d)                                             \
-	nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
-#define nouveau_vp_destroy(d)                                                  \
-	nouveau_engine_destroy(&(d)->base)
-#define nouveau_vp_init(d)                                                     \
-	nouveau_engine_init(&(d)->base)
-#define nouveau_vp_fini(d,s)                                                   \
-	nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_vp_dtor _nouveau_engine_dtor
-#define _nouveau_vp_init _nouveau_engine_init
-#define _nouveau_vp_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv84_vp_oclass;
+extern struct nouveau_oclass nvc0_vp_oclass;
+extern struct nouveau_oclass nve0_vp_oclass;
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index d682fb625833..b79025da581e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -23,6 +23,7 @@ struct dcb_output {
 	uint8_t bus;
 	uint8_t location;
 	uint8_t or;
+	uint8_t link;
 	bool duallink_possible;
 	union {
 		struct sor_conf {
@@ -55,36 +56,11 @@ struct dcb_output {
 
 u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
 u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
+		   struct dcb_output *);
+u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
+		   struct dcb_output *);
 int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
 		     (struct nouveau_bios *, void *, int index, u16 entry));
 
-
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-static inline bool
-dcb_hash_match(struct dcb_output *dcb, u32 hash)
-{
-	if ((hash & 0x000000f0) != (dcb->location << 4))
-		return false;
-	if ((hash & 0x0000000f) != dcb->type)
-		return false;
-	if (!(hash & (dcb->or << 16)))
-		return false;
-
-	switch (dcb->type) {
-	case DCB_OUTPUT_TMDS:
-	case DCB_OUTPUT_LVDS:
-	case DCB_OUTPUT_DP:
-		if (hash & 0x00c00000) {
-			if (!(hash & (dcb->sorconf.link << 22)))
-				return false;
-		}
-	default:
-		return true;
-	}
-}
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644
index 000000000000..c35937e2f6a4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
@@ -0,0 +1,48 @@
+#ifndef __NVBIOS_DISP_H__
+#define __NVBIOS_DISP_H__
+
+u16 nvbios_disp_table(struct nouveau_bios *,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
+
+struct nvbios_disp {
+	u16 data;
+};
+
+u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
+		      u8 *ver, u8 *hdr__, u8 *sub);
+u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
+		      u8 *ver, u8 *hdr__, u8 *sub,
+		      struct nvbios_disp *);
+
+struct nvbios_outp {
+	u16 type;
+	u16 mask;
+	u16 script[3];
+};
+
+u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_outp *);
+u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_outp *);
+
+
+struct nvbios_ocfg {
+	u16 match;
+	u16 clkcmp[2];
+};
+
+u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_ocfg *);
+u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_ocfg *);
+u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
index 73b5e5d3e75a..6e54218b55fc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -1,8 +1,34 @@
 #ifndef __NVBIOS_DP_H__
 #define __NVBIOS_DP_H__
 
-u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
-u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
+struct nvbios_dpout {
+	u16 type;
+	u16 mask;
+	u8  flags;
+	u32 script[5];
+	u32 lnkcmp;
+};
+
+u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
+		       u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		       struct nvbios_dpout *);
+u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
+		       u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		       struct nvbios_dpout *);
+
+struct nvbios_dpcfg {
+	u8 drv;
+	u8 pre;
+	u8 unk;
+};
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpcfg *);
+u16
+nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpcfg *);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 5c1b5e1904f9..da470e6851b1 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -69,8 +69,11 @@ struct nouveau_fb {
 		} type;
 		u64 stolen;
 		u64 size;
-		int ranks;
 
+		int ranks;
+		int parts;
+
+		int  (*init)(struct nouveau_fb *);
 		int  (*get)(struct nouveau_fb *, u64 size, u32 align,
 			    u32 size_nc, u32 type, struct nouveau_mem **);
 		void (*put)(struct nouveau_fb *, struct nouveau_mem **);
@@ -84,6 +87,8 @@ struct nouveau_fb {
 		int regions;
 		void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
 			     u32 pitch, u32 flags, struct nouveau_fb_tile *);
+		void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+			     struct nouveau_fb_tile *);
 		void (*fini)(struct nouveau_fb *, int i,
 			     struct nouveau_fb_tile *);
 		void (*prog)(struct nouveau_fb *, int i,
@@ -99,7 +104,7 @@ nouveau_fb(void *obj)
 
 #define nouveau_fb_create(p,e,c,d)                                             \
 	nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
-int  nouveau_fb_created(struct nouveau_fb *);
+int  nouveau_fb_preinit(struct nouveau_fb *);
 void nouveau_fb_destroy(struct nouveau_fb *);
 int  nouveau_fb_init(struct nouveau_fb *);
 #define nouveau_fb_fini(p,s)                                                   \
@@ -111,9 +116,19 @@ int  _nouveau_fb_init(struct nouveau_object *);
 
 extern struct nouveau_oclass nv04_fb_oclass;
 extern struct nouveau_oclass nv10_fb_oclass;
+extern struct nouveau_oclass nv1a_fb_oclass;
 extern struct nouveau_oclass nv20_fb_oclass;
+extern struct nouveau_oclass nv25_fb_oclass;
 extern struct nouveau_oclass nv30_fb_oclass;
+extern struct nouveau_oclass nv35_fb_oclass;
+extern struct nouveau_oclass nv36_fb_oclass;
 extern struct nouveau_oclass nv40_fb_oclass;
+extern struct nouveau_oclass nv41_fb_oclass;
+extern struct nouveau_oclass nv44_fb_oclass;
+extern struct nouveau_oclass nv46_fb_oclass;
+extern struct nouveau_oclass nv47_fb_oclass;
+extern struct nouveau_oclass nv49_fb_oclass;
+extern struct nouveau_oclass nv4e_fb_oclass;
 extern struct nouveau_oclass nv50_fb_oclass;
 extern struct nouveau_oclass nvc0_fb_oclass;
 
@@ -122,13 +137,35 @@ int  nouveau_fb_bios_memtype(struct nouveau_bios *);
 
 bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
 
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
 void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
 
+int  nv20_fb_vram_init(struct nouveau_fb *);
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv30_fb_init(struct nouveau_object *);
 void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
 		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+		       struct nouveau_fb_tile *);
+
+int  nv41_fb_vram_init(struct nouveau_fb *);
+int  nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv44_fb_vram_init(struct nouveau_fb *);
+int  nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
 
 void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
-void nv50_fb_trap(struct nouveau_fb *, int display);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index cd01c533007a..d70ba342aa2e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object)
 }
 
 static u32
-nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
 {
 	struct nouveau_barobj *barobj = (void *)object;
 	return ioread32_native(barobj->iomem + addr);
 }
 
 static void
-nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
 	struct nouveau_barobj *barobj = (void *)object;
 	iowrite32_native(data, barobj->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 70ca7d5a1aa1..dd111947eb86 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
 	struct pci_dev *pdev = nv_device(bios)->pdev;
 	struct device_node *dn;
 	const u32 *data;
-	int size, i;
+	int size;
 
 	dn = pci_device_to_OF_node(pdev);
 	if (!dn) {
@@ -210,11 +210,19 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
 		return;
 
 	bios->data = kmalloc(bios->size, GFP_KERNEL);
-	for (i = 0; bios->data && i < bios->size; i += cnt) {
-		cnt = min((bios->size - i), (u32)4096);
-		ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
-		if (ret != cnt)
-			break;
+	if (bios->data) {
+		/* disobey the acpi spec - much faster on at least w530 ... */
+		ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
+		if (ret != bios->size ||
+		    nvbios_checksum(bios->data, bios->size)) {
+			/* ... that didn't work, ok, i'll be good now */
+			for (i = 0; i < bios->size; i += cnt) {
+				cnt = min((bios->size - i), (u32)4096);
+				ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
+				if (ret != cnt)
+					break;
+			}
+		}
 	}
 }
 
@@ -358,42 +366,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
 }
 
 static u8
-nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
 {
 	struct nouveau_bios *bios = (void *)object;
 	return bios->data[addr];
 }
 
 static u16
-nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
 {
 	struct nouveau_bios *bios = (void *)object;
 	return get_unaligned_le16(&bios->data[addr]);
 }
 
 static u32
-nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
 {
 	struct nouveau_bios *bios = (void *)object;
 	return get_unaligned_le32(&bios->data[addr]);
 }
 
 static void
-nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
 {
 	struct nouveau_bios *bios = (void *)object;
 	bios->data[addr] = data;
 }
 
 static void
-nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
 {
 	struct nouveau_bios *bios = (void *)object;
 	put_unaligned_le16(data, &bios->data[addr]);
 }
 
 static void
-nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
 	struct nouveau_bios *bios = (void *)object;
 	put_unaligned_le32(data, &bios->data[addr]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index c51197157749..0fd87df99dd6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -107,6 +107,69 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
 	return 0x0000;
 }
 
+u16
+dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+	       struct dcb_output *outp)
+{
+	u16 dcb = dcb_outp(bios, idx, ver, len);
+	if (dcb) {
+		if (*ver >= 0x20) {
+			u32 conn = nv_ro32(bios, dcb + 0x00);
+			outp->or        = (conn & 0x0f000000) >> 24;
+			outp->location  = (conn & 0x00300000) >> 20;
+			outp->bus       = (conn & 0x000f0000) >> 16;
+			outp->connector = (conn & 0x0000f000) >> 12;
+			outp->heads     = (conn & 0x00000f00) >> 8;
+			outp->i2c_index = (conn & 0x000000f0) >> 4;
+			outp->type      = (conn & 0x0000000f);
+			outp->link      = 0;
+		} else {
+			dcb = 0x0000;
+		}
+
+		if (*ver >= 0x40) {
+			u32 conf = nv_ro32(bios, dcb + 0x04);
+			switch (outp->type) {
+			case DCB_OUTPUT_TMDS:
+			case DCB_OUTPUT_LVDS:
+			case DCB_OUTPUT_DP:
+				outp->link = (conf & 0x00000030) >> 4;
+				outp->sorconf.link = outp->link; /*XXX*/
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	return dcb;
+}
+
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+	return outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+	return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
+u16
+dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+	       u8 *ver, u8 *len, struct dcb_output *outp)
+{
+	u16 dcb, idx = 0;
+	while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
+		if (dcb_outp_hasht(outp) == type) {
+			if ((dcb_outp_hashm(outp) & mask) == mask)
+				break;
+		}
+	}
+	return dcb;
+}
+
 int
 dcb_outp_foreach(struct nouveau_bios *bios, void *data,
 		 int (*exec)(struct nouveau_bios *, void *, int, u16))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644
index 000000000000..7f16e52d9bea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/disp.h>
+
+u16
+nvbios_disp_table(struct nouveau_bios *bios,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
+{
+	struct bit_entry U;
+
+	if (!bit_entry(bios, 'U', &U)) {
+		if (U.version == 1) {
+			u16 data = nv_ro16(bios, U.offset);
+			if (data) {
+				*ver = nv_ro08(bios, data + 0x00);
+				switch (*ver) {
+				case 0x20:
+				case 0x21:
+					*hdr = nv_ro08(bios, data + 0x01);
+					*len = nv_ro08(bios, data + 0x02);
+					*cnt = nv_ro08(bios, data + 0x03);
+					*sub = nv_ro08(bios, data + 0x04);
+					return data;
+				default:
+					break;
+				}
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
+		  u8 *ver, u8 *len, u8 *sub)
+{
+	u8  hdr, cnt;
+	u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
+	if (data && idx < cnt)
+		return data + hdr + (idx * *len);
+	*ver = 0x00;
+	return 0x0000;
+}
+
+u16
+nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
+		  u8 *ver, u8 *len, u8 *sub,
+		  struct nvbios_disp *info)
+{
+	u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
+	if (data && *len >= 2) {
+		info->data = nv_ro16(bios, data + 0);
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct nvbios_disp info;
+	u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
+	if (data) {
+		*cnt = nv_ro08(bios, info.data + 0x05);
+		*len = 0x06;
+		data = info.data;
+	}
+	return data;
+}
+
+u16
+nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_outp *info)
+{
+	u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
+	if (data && *hdr >= 0x0a) {
+		info->type      = nv_ro16(bios, data + 0x00);
+		info->mask      = nv_ro32(bios, data + 0x02);
+		if (*ver <= 0x20) /* match any link */
+			info->mask |= 0x00c0;
+		info->script[0] = nv_ro16(bios, data + 0x06);
+		info->script[1] = nv_ro16(bios, data + 0x08);
+		info->script[2] = 0x0000;
+		if (*hdr >= 0x0c)
+			info->script[2] = nv_ro16(bios, data + 0x0a);
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_outp *info)
+{
+	u16 data, idx = 0;
+	while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+		if (data && info->type == type) {
+			if ((info->mask & mask) == mask)
+				break;
+		}
+	}
+	return data;
+}
+
+u16
+nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	if (idx < *cnt)
+		return outp + *hdr + (idx * *len);
+	return 0x0000;
+}
+
+u16
+nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_ocfg *info)
+{
+	u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+	if (data) {
+		info->match     = nv_ro16(bios, data + 0x00);
+		info->clkcmp[0] = nv_ro16(bios, data + 0x02);
+		info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+	}
+	return data;
+}
+
+u16
+nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_ocfg *info)
+{
+	u16 data, idx = 0;
+	while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
+		if (info->match == type)
+			break;
+	}
+	return data;
+}
+
+u16
+nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
+{
+	while (cmp) {
+		if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
+			return  nv_ro16(bios, cmp + 0x02);
+		cmp += 0x04;
+	}
+	return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 3cbc0f3e8d5e..663853bcca82 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -25,23 +25,29 @@
 
 #include "subdev/bios.h"
 #include "subdev/bios/bit.h"
-#include "subdev/bios/dcb.h"
 #include "subdev/bios/dp.h"
 
-u16
-dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+static u16
+nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-	struct bit_entry bit_d;
+	struct bit_entry d;
 
-	if (!bit_entry(bios, 'd', &bit_d)) {
-		if (bit_d.version == 1) {
-			u16 data = nv_ro16(bios, bit_d.offset);
+	if (!bit_entry(bios, 'd', &d)) {
+		if (d.version == 1 && d.length >= 2) {
+			u16 data = nv_ro16(bios, d.offset);
 			if (data) {
-				*ver = nv_ro08(bios, data + 0);
-				*hdr = nv_ro08(bios, data + 1);
-				*len = nv_ro08(bios, data + 2);
-				*cnt = nv_ro08(bios, data + 3);
-				return data;
+				*ver = nv_ro08(bios, data + 0x00);
+				switch (*ver) {
+				case 0x21:
+				case 0x30:
+				case 0x40:
+					*hdr = nv_ro08(bios, data + 0x01);
+					*len = nv_ro08(bios, data + 0x02);
+					*cnt = nv_ro08(bios, data + 0x03);
+					return data;
+				default:
+					break;
+				}
 			}
 		}
 	}
@@ -49,28 +55,150 @@ dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 	return 0x0000;
 }
 
-u16
-dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+static u16
+nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-	u8  hdr, cnt;
-	u16 table = dp_table(bios, ver, &hdr, &cnt, len);
-	if (table && idx < cnt)
-		return nv_ro16(bios, table + hdr + (idx * *len));
-	return 0xffff;
+	u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
+	if (data && idx < *cnt) {
+		u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+		switch (*ver * !!outp) {
+		case 0x21:
+		case 0x30:
+			*hdr = nv_ro08(bios, data + 0x04);
+			*len = nv_ro08(bios, data + 0x05);
+			*cnt = nv_ro08(bios, outp + 0x04);
+			break;
+		case 0x40:
+			*hdr = nv_ro08(bios, data + 0x04);
+			*cnt = 0;
+			*len = 0;
+			break;
+		default:
+			break;
+		}
+		return outp;
+	}
+	*ver = 0x00;
+	return 0x0000;
 }
 
 u16
-dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
-	      u8 *ver, u8 *len)
+nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpout *info)
 {
-	u8  idx = 0;
-	u16 data;
-	while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
-		if (data) {
-			u32 hash = nv_ro32(bios, data);
-			if (dcb_hash_match(outp, hash))
-				return data;
+	u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+	if (data && *ver) {
+		info->type = nv_ro16(bios, data + 0x00);
+		info->mask = nv_ro16(bios, data + 0x02);
+		switch (*ver) {
+		case 0x21:
+		case 0x30:
+			info->flags     = nv_ro08(bios, data + 0x05);
+			info->script[0] = nv_ro16(bios, data + 0x06);
+			info->script[1] = nv_ro16(bios, data + 0x08);
+			info->lnkcmp    = nv_ro16(bios, data + 0x0a);
+			info->script[2] = nv_ro16(bios, data + 0x0c);
+			info->script[3] = nv_ro16(bios, data + 0x0e);
+			info->script[4] = nv_ro16(bios, data + 0x10);
+			break;
+		case 0x40:
+			info->flags     = nv_ro08(bios, data + 0x04);
+			info->script[0] = nv_ro16(bios, data + 0x05);
+			info->script[1] = nv_ro16(bios, data + 0x07);
+			info->lnkcmp    = nv_ro16(bios, data + 0x09);
+			info->script[2] = nv_ro16(bios, data + 0x0b);
+			info->script[3] = nv_ro16(bios, data + 0x0d);
+			info->script[4] = nv_ro16(bios, data + 0x0f);
+			break;
+		default:
+			data = 0x0000;
+			break;
 		}
 	}
+	return data;
+}
+
+u16
+nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpout *info)
+{
+	u16 data, idx = 0;
+	while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+		if (data && info->type == type) {
+			if ((info->mask & mask) == mask)
+				break;
+		}
+	}
+	return data;
+}
+
+static u16
+nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	if (*ver >= 0x40) {
+		outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
+		*hdr = *hdr + (*len * * cnt);
+		*len = nv_ro08(bios, outp + 0x06);
+		*cnt = nv_ro08(bios, outp + 0x07);
+	}
+
+	if (idx < *cnt)
+		return outp + *hdr + (idx * *len);
+
 	return 0x0000;
 }
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpcfg *info)
+{
+	u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+	if (data) {
+		switch (*ver) {
+		case 0x21:
+			info->drv = nv_ro08(bios, data + 0x02);
+			info->pre = nv_ro08(bios, data + 0x03);
+			info->unk = nv_ro08(bios, data + 0x04);
+			break;
+		case 0x30:
+		case 0x40:
+			info->drv = nv_ro08(bios, data + 0x01);
+			info->pre = nv_ro08(bios, data + 0x02);
+			info->unk = nv_ro08(bios, data + 0x03);
+			break;
+		default:
+			data = 0x0000;
+			break;
+		}
+	}
+	return data;
+}
+
+u16
+nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpcfg *info)
+{
+	u8 idx = 0xff;
+	u16 data;
+
+	if (*ver >= 0x30) {
+		const u8 vsoff[] = { 0, 4, 7, 9 };
+		idx = (un * 10) + vsoff[vs] + pe;
+	} else {
+		while ((data = nvbios_dpcfg_entry(bios, outp, idx,
+						  ver, hdr, cnt, len))) {
+			if (nv_ro08(bios, data + 0x00) == vs &&
+			    nv_ro08(bios, data + 0x01) == pe)
+				break;
+			idx++;
+		}
+	}
+
+	return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index 4c9f1e508165..c90d4aa3ae4f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -101,8 +101,8 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
 	}
 
 	/* DCB 2.2, fixed TVDAC GPIO data */
-	if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
-		if (func == DCB_GPIO_TVDAC0) {
+	if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) {
+		if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) {
 			u8 conf = nv_ro08(bios, entry - 5);
 			u8 addr = nv_ro08(bios, entry - 4);
 			if (conf & 0x01) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 6be8c32f6e4c..ae168bbb86d8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -743,9 +743,10 @@ static void
 init_dp_condition(struct nvbios_init *init)
 {
 	struct nouveau_bios *bios = init->bios;
+	struct nvbios_dpout info;
 	u8  cond = nv_ro08(bios, init->offset + 1);
 	u8  unkn = nv_ro08(bios, init->offset + 2);
-	u8  ver, len;
+	u8  ver, hdr, cnt, len;
 	u16 data;
 
 	trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
@@ -759,10 +760,12 @@ init_dp_condition(struct nvbios_init *init)
 	case 1:
 	case 2:
 		if ( init->outp &&
-		    (data = dp_outp_match(bios, init->outp, &ver, &len))) {
-			if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
-				init_exec_set(init, false);
-			if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
+		    (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
+					       (init->outp->or << 0) |
+					       (init->outp->sorconf.link << 6),
+					       &ver, &hdr, &cnt, &len, &info)))
+		{
+			if (!(info.flags & cond))
 				init_exec_set(init, false);
 			break;
 		}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index ca9a4648bd8a..f8a7ed4166cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -25,7 +25,6 @@
 #include <core/object.h>
 #include <core/device.h>
 #include <core/client.h>
-#include <core/device.h>
 #include <core/option.h>
 
 #include <core/class.h>
@@ -61,19 +60,24 @@ struct nouveau_devobj {
 
 static const u64 disable_map[] = {
 	[NVDEV_SUBDEV_VBIOS]	= NV_DEVICE_DISABLE_VBIOS,
+	[NVDEV_SUBDEV_DEVINIT]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_GPIO]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_I2C]	= NV_DEVICE_DISABLE_CORE,
-	[NVDEV_SUBDEV_DEVINIT]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_CLOCK]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_MXM]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_MC]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_TIMER]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_FB]	= NV_DEVICE_DISABLE_CORE,
-	[NVDEV_SUBDEV_VM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_LTCG]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_IBUS]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_INSTMEM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_VM]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_BAR]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_VOLT]	= NV_DEVICE_DISABLE_CORE,
-	[NVDEV_SUBDEV_CLOCK]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_THERM]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_ENGINE_DMAOBJ]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_ENGINE_FIFO]	= NV_DEVICE_DISABLE_FIFO,
+	[NVDEV_ENGINE_SW]	= NV_DEVICE_DISABLE_FIFO,
 	[NVDEV_ENGINE_GR]	= NV_DEVICE_DISABLE_GRAPH,
 	[NVDEV_ENGINE_MPEG]	= NV_DEVICE_DISABLE_MPEG,
 	[NVDEV_ENGINE_ME]	= NV_DEVICE_DISABLE_ME,
@@ -84,7 +88,7 @@ static const u64 disable_map[] = {
 	[NVDEV_ENGINE_COPY0]	= NV_DEVICE_DISABLE_COPY0,
 	[NVDEV_ENGINE_COPY1]	= NV_DEVICE_DISABLE_COPY1,
 	[NVDEV_ENGINE_UNK1C1]	= NV_DEVICE_DISABLE_UNK1C1,
-	[NVDEV_ENGINE_FIFO]	= NV_DEVICE_DISABLE_FIFO,
+	[NVDEV_ENGINE_VENC]	= NV_DEVICE_DISABLE_VENC,
 	[NVDEV_ENGINE_DISP]	= NV_DEVICE_DISABLE_DISP,
 	[NVDEV_SUBDEV_NR]	= 0,
 };
@@ -208,7 +212,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
 
 		/* determine frequency of timing crystal */
 		if ( device->chipset < 0x17 ||
-		    (device->chipset >= 0x20 && device->chipset <= 0x25))
+		    (device->chipset >= 0x20 && device->chipset < 0x25))
 			strap &= 0x00000040;
 		else
 			strap &= 0x00400040;
@@ -356,37 +360,37 @@ fail:
 }
 
 static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
 {
 	return nv_rd08(object->engine, addr);
 }
 
 static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
 {
 	return nv_rd16(object->engine, addr);
 }
 
 static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
 {
 	return nv_rd32(object->engine, addr);
 }
 
 static void
-nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
 {
 	nv_wr08(object->engine, addr, data);
 }
 
 static void
-nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
 {
 	nv_wr16(object->engine, addr, data);
 }
 
 static void
-nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
 	nv_wr32(object->engine, addr, data);
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index f09accfd0e31..9c40b0fb23f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 5fa58b7369b5..74f88f48e1c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 7f4b8fe6cccc..0ac1b2c4f61d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv35_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv36_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 42deadca0f0a..41d59689a021 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv47_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv4e_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
 		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index fec3bcc9a6fc..6ccfd8585ba2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
 		break;
 	case 0x86:
 		device->cname = "G86";
@@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
 		break;
 	case 0x92:
 		device->cname = "G92";
@@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
 		break;
 	case 0x94:
 		device->cname = "G94";
@@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
 		break;
 	case 0x96:
 		device->cname = "G96";
@@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
 		break;
 	case 0x98:
 		device->cname = "G98";
@@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
 		break;
 	case 0xa0:
 		device->cname = "G200";
@@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva0_disp_oclass;
 		break;
 	case 0xaa:
 		device->cname = "MCP77/MCP78";
@@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
 		break;
 	case 0xac:
 		device->cname = "MCP79/MCP7A";
@@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
 		break;
 	case 0xa3:
 		device->cname = "GT215";
@@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xa5:
 		device->cname = "GT216";
@@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xa8:
 		device->cname = "GT218";
@@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xaf:
 		device->cname = "MCP89";
@@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	default:
 		nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 6697f0f9c293..f0461685a422 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xc4:
 		device->cname = "GF104";
@@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xc3:
 		device->cname = "GF106";
@@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xce:
 		device->cname = "GF114";
@@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xcf:
 		device->cname = "GF116";
@@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xc1:
 		device->cname = "GF108";
@@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xc8:
 		device->cname = "GF110";
@@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
 		break;
 	case 0xd9:
 		device->cname = "GF119";
@@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
 		break;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 4a280b7ab853..9b7881e76634 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -45,6 +45,9 @@
 #include <engine/graph.h>
 #include <engine/disp.h>
 #include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
 
 int
 nve0_identify(struct nouveau_device *device)
@@ -67,13 +70,16 @@ nve0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		break;
 	case 0xe7:
 		device->cname = "GK107";
@@ -92,13 +98,16 @@ nve0_identify(struct nouveau_device *device)
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
 		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
 		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		break;
 	default:
 		nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 61becfa732e9..ae7249b09797 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -22,6 +22,10 @@
  * Authors: Ben Skeggs
  */
 
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
 #include <subdev/devinit.h>
 #include <subdev/vga.h>
 
@@ -55,7 +59,12 @@ nv50_devinit_dtor(struct nouveau_object *object)
 static int
 nv50_devinit_init(struct nouveau_object *object)
 {
+	struct nouveau_bios *bios = nouveau_bios(object);
 	struct nv50_devinit_priv *priv = (void *)object;
+	struct nvbios_outp info;
+	struct dcb_output outp;
+	u8  ver = 0xff, hdr, cnt, len;
+	int ret, i = 0;
 
 	if (!priv->base.post) {
 		if (!nv_rdvgac(priv, 0, 0x00) &&
@@ -65,7 +74,30 @@ nv50_devinit_init(struct nouveau_object *object)
 		}
 	}
 
-	return nouveau_devinit_init(&priv->base);
+	ret = nouveau_devinit_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* if we ran the init tables, execute first script pointer for each
+	 * display table output entry that has a matching dcb entry.
+	 */
+	while (priv->base.post && ver) {
+		u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
+		if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+			struct nvbios_init init = {
+				.subdev = nv_subdev(priv),
+				.bios = bios,
+				.offset = info.script[0],
+				.outp = &outp,
+				.crtc = -1,
+				.execute = 1,
+			};
+
+			nvbios_exec(&init);
+		}
+	};
+
+	return 0;
 }
 
 static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f0086de8af31..d6d16007ec1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -56,6 +56,67 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
 	return NV_MEM_TYPE_UNKNOWN;
 }
 
+int
+nouveau_fb_preinit(struct nouveau_fb *pfb)
+{
+	static const char *name[] = {
+		[NV_MEM_TYPE_UNKNOWN] = "unknown",
+		[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
+		[NV_MEM_TYPE_SGRAM  ] = "SGRAM",
+		[NV_MEM_TYPE_SDRAM  ] = "SDRAM",
+		[NV_MEM_TYPE_DDR1   ] = "DDR1",
+		[NV_MEM_TYPE_DDR2   ] = "DDR2",
+		[NV_MEM_TYPE_DDR3   ] = "DDR3",
+		[NV_MEM_TYPE_GDDR2  ] = "GDDR2",
+		[NV_MEM_TYPE_GDDR3  ] = "GDDR3",
+		[NV_MEM_TYPE_GDDR4  ] = "GDDR4",
+		[NV_MEM_TYPE_GDDR5  ] = "GDDR5",
+	};
+	int ret, tags;
+
+	tags = pfb->ram.init(pfb);
+	if (tags < 0 || !pfb->ram.size) {
+		nv_fatal(pfb, "error detecting memory configuration!!\n");
+		return (tags < 0) ? tags : -ERANGE;
+	}
+
+	if (!nouveau_mm_initialised(&pfb->vram)) {
+		ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (!nouveau_mm_initialised(&pfb->tags) && tags) {
+		ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+		if (ret)
+			return ret;
+	}
+
+	nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
+	nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
+	nv_info(pfb, "   ZCOMP: %d tags\n", tags);
+	return 0;
+}
+
+void
+nouveau_fb_destroy(struct nouveau_fb *pfb)
+{
+	int i;
+
+	for (i = 0; i < pfb->tile.regions; i++)
+		pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
+	nouveau_mm_fini(&pfb->tags);
+	nouveau_mm_fini(&pfb->vram);
+
+	nouveau_subdev_destroy(&pfb->base);
+}
+
+void
+_nouveau_fb_dtor(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object;
+	nouveau_fb_destroy(pfb);
+}
 int
 nouveau_fb_init(struct nouveau_fb *pfb)
 {
@@ -77,54 +138,3 @@ _nouveau_fb_init(struct nouveau_object *object)
 	struct nouveau_fb *pfb = (void *)object;
 	return nouveau_fb_init(pfb);
 }
-
-void
-nouveau_fb_destroy(struct nouveau_fb *pfb)
-{
-	int i;
-
-	for (i = 0; i < pfb->tile.regions; i++)
-		pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-
-	if (pfb->tags.block_size)
-		nouveau_mm_fini(&pfb->tags);
-
-	if (pfb->vram.block_size)
-		nouveau_mm_fini(&pfb->vram);
-
-	nouveau_subdev_destroy(&pfb->base);
-}
-
-void
-_nouveau_fb_dtor(struct nouveau_object *object)
-{
-	struct nouveau_fb *pfb = (void *)object;
-	nouveau_fb_destroy(pfb);
-}
-
-int
-nouveau_fb_created(struct nouveau_fb *pfb)
-{
-	static const char *name[] = {
-		[NV_MEM_TYPE_UNKNOWN] = "unknown",
-		[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
-		[NV_MEM_TYPE_SGRAM  ] = "SGRAM",
-		[NV_MEM_TYPE_SDRAM  ] = "SDRAM",
-		[NV_MEM_TYPE_DDR1   ] = "DDR1",
-		[NV_MEM_TYPE_DDR2   ] = "DDR2",
-		[NV_MEM_TYPE_DDR3   ] = "DDR3",
-		[NV_MEM_TYPE_GDDR2  ] = "GDDR2",
-		[NV_MEM_TYPE_GDDR3  ] = "GDDR3",
-		[NV_MEM_TYPE_GDDR4  ] = "GDDR4",
-		[NV_MEM_TYPE_GDDR5  ] = "GDDR5",
-	};
-
-	if (pfb->ram.size == 0) {
-		nv_fatal(pfb, "no vram detected!!\n");
-		return -ERANGE;
-	}
-
-	nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
-	nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index eb06836b69f7..6e369f85361e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -55,6 +55,37 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 	return false;
 }
 
+static int
+nv04_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
+	if (boot0 & 0x00000100) {
+		pfb->ram.size  = ((boot0 >> 12) & 0xf) * 2 + 2;
+		pfb->ram.size *= 1024 * 1024;
+	} else {
+		switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+			pfb->ram.size = 32 * 1024 * 1024;
+			break;
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+			pfb->ram.size = 16 * 1024 * 1024;
+			break;
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+			pfb->ram.size = 8 * 1024 * 1024;
+			break;
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+			pfb->ram.size = 4 * 1024 * 1024;
+			break;
+		}
+	}
+
+	if ((boot0 & 0x00000038) <= 0x10)
+		pfb->ram.type = NV_MEM_TYPE_SGRAM;
+	else
+		pfb->ram.type = NV_MEM_TYPE_SDRAM;
+	return 0;
+}
+
 static int
 nv04_fb_init(struct nouveau_object *object)
 {
@@ -79,7 +110,6 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_object **pobject)
 {
 	struct nv04_fb_priv *priv;
-	u32 boot0;
 	int ret;
 
 	ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -87,35 +117,9 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
-	if (boot0 & 0x00000100) {
-		priv->base.ram.size  = ((boot0 >> 12) & 0xf) * 2 + 2;
-		priv->base.ram.size *= 1024 * 1024;
-	} else {
-		switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
-		case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
-			priv->base.ram.size = 32 * 1024 * 1024;
-			break;
-		case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
-			priv->base.ram.size = 16 * 1024 * 1024;
-			break;
-		case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
-			priv->base.ram.size = 8 * 1024 * 1024;
-			break;
-		case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
-			priv->base.ram.size = 4 * 1024 * 1024;
-			break;
-		}
-	}
-
-	if ((boot0 & 0x00000038) <= 0x10)
-		priv->base.ram.type = NV_MEM_TYPE_SGRAM;
-	else
-		priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-
 	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	return nouveau_fb_created(&priv->base);
+	priv->base.ram.init = nv04_fb_vram_init;
+	return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index f037a422d2f4..edbbe26e858d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -30,7 +30,20 @@ struct nv10_fb_priv {
 	struct nouveau_fb base;
 };
 
-static void
+static int
+nv10_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 cfg0 = nv_rd32(pfb, 0x100200);
+	if (cfg0 & 0x00000001)
+		pfb->ram.type = NV_MEM_TYPE_DDR1;
+	else
+		pfb->ram.type = NV_MEM_TYPE_SDRAM;
+
+	pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	return 0;
+}
+
+void
 nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nouveau_fb_tile *tile)
 {
@@ -39,7 +52,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
 	tile->pitch = pitch;
 }
 
-static void
+void
 nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 {
 	tile->addr  = 0;
@@ -54,6 +67,7 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
 	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
 	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+	nv_rd32(pfb, 0x100240 + (i * 0x10));
 }
 
 static int
@@ -61,7 +75,6 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
 	     struct nouveau_object **pobject)
 {
-	struct nouveau_device *device = nv_device(parent);
 	struct nv10_fb_priv *priv;
 	int ret;
 
@@ -70,42 +83,13 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	if (device->chipset == 0x1a ||  device->chipset == 0x1f) {
-		struct pci_dev *bridge;
-		u32 mem, mib;
-
-		bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
-		if (!bridge) {
-			nv_fatal(device, "no bridge device\n");
-			return 0;
-		}
-
-		if (device->chipset == 0x1a) {
-			pci_read_config_dword(bridge, 0x7c, &mem);
-			mib = ((mem >> 6) & 31) + 1;
-		} else {
-			pci_read_config_dword(bridge, 0x84, &mem);
-			mib = ((mem >> 4) & 127) + 1;
-		}
-
-		priv->base.ram.type = NV_MEM_TYPE_STOLEN;
-		priv->base.ram.size = mib * 1024 * 1024;
-	} else {
-		u32 cfg0 = nv_rd32(priv, 0x100200);
-		if (cfg0 & 0x00000001)
-			priv->base.ram.type = NV_MEM_TYPE_DDR1;
-		else
-			priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-		priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-	}
-
 	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv10_fb_vram_init;
 	priv->base.tile.regions = 8;
 	priv->base.tile.init = nv10_fb_tile_init;
 	priv->base.tile.fini = nv10_fb_tile_fini;
 	priv->base.tile.prog = nv10_fb_tile_prog;
-	return nouveau_fb_created(&priv->base);
+	return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644
index 000000000000..48366841db4a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv1a_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv1a_fb_vram_init(struct nouveau_fb *pfb)
+{
+	struct pci_dev *bridge;
+	u32 mem, mib;
+
+	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+	if (!bridge) {
+		nv_fatal(pfb, "no bridge device\n");
+		return -ENODEV;
+	}
+
+	if (nv_device(pfb)->chipset == 0x1a) {
+		pci_read_config_dword(bridge, 0x7c, &mem);
+		mib = ((mem >> 6) & 31) + 1;
+	} else {
+		pci_read_config_dword(bridge, 0x84, &mem);
+		mib = ((mem >> 4) & 127) + 1;
+	}
+
+	pfb->ram.type = NV_MEM_TYPE_STOLEN;
+	pfb->ram.size = mib * 1024 * 1024;
+	return 0;
+}
+
+static int
+nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv1a_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv1a_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv10_fb_tile_init;
+	priv->base.tile.fini = nv10_fb_tile_fini;
+	priv->base.tile.prog = nv10_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv1a_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x1a),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv1a_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = _nouveau_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index 4b3578fcb7fb..5d14612a2c8e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -30,43 +30,54 @@ struct nv20_fb_priv {
 	struct nouveau_fb base;
 };
 
-static void
+int
+nv20_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 pbus1218 = nv_rd32(pfb, 0x001218);
+
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+	case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
+	}
+	pfb->ram.size  = (nv_rd32(pfb, 0x10020c) & 0xff000000);
+	pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+
+	return nv_rd32(pfb, 0x100320);
+}
+
+void
 nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nouveau_fb_tile *tile)
 {
-	struct nouveau_device *device = nv_device(pfb);
-	int bpp = (flags & 2) ? 32 : 16;
-
 	tile->addr  = 0x00000001 | addr;
 	tile->limit = max(1u, addr + size) - 1;
 	tile->pitch = pitch;
-
-	/* Allocate some of the on-die tag memory, used to store Z
-	 * compression meta-data (most likely just a bitmap determining
-	 * if a given tile is compressed or not).
-	 */
-	size /= 256;
 	if (flags & 4) {
-		if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
-			/* Enable Z compression */
-			tile->zcomp = tile->tag->offset;
-			if (device->chipset >= 0x25) {
-				if (bpp == 16)
-					tile->zcomp |= 0x00100000;
-				else
-					tile->zcomp |= 0x00200000;
-			} else {
-				tile->zcomp |= 0x80000000;
-				if (bpp != 16)
-					tile->zcomp |= 0x04000000;
-			}
-		}
-
+		pfb->tile.comp(pfb, i, size, flags, tile);
 		tile->addr |= 2;
 	}
 }
 
 static void
+nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
+		else              tile->zcomp = 0x04000000; /* Z24S8 */
+		tile->zcomp |= tile->tag->offset;
+		tile->zcomp |= 0x80000000; /* enable */
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x08000000;
+#endif
+	}
+}
+
+void
 nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 {
 	tile->addr  = 0;
@@ -76,12 +87,13 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 	nouveau_mm_free(&pfb->tags, &tile->tag);
 }
 
-static void
+void
 nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 {
 	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
 	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
 	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+	nv_rd32(pfb, 0x100240 + (i * 0x10));
 	nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
 }
 
@@ -90,9 +102,7 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
 	     struct nouveau_object **pobject)
 {
-	struct nouveau_device *device = nv_device(parent);
 	struct nv20_fb_priv *priv;
-	u32 pbus1218;
 	int ret;
 
 	ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -100,28 +110,14 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	pbus1218 = nv_rd32(priv, 0x001218);
-	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
-	}
-	priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
-	if (device->chipset >= 0x25)
-		ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
-	else
-		ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
-	if (ret)
-		return ret;
-
 	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
 	priv->base.tile.regions = 8;
 	priv->base.tile.init = nv20_fb_tile_init;
+	priv->base.tile.comp = nv20_fb_tile_comp;
 	priv->base.tile.fini = nv20_fb_tile_fini;
 	priv->base.tile.prog = nv20_fb_tile_prog;
-	return nouveau_fb_created(&priv->base);
+	return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644
index 000000000000..0042ace6bef9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv25_fb_priv {
+	struct nouveau_fb base;
+};
+
+static void
+nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
+		else              tile->zcomp = 0x00200000; /* Z24S8 */
+		tile->zcomp |= tile->tag->offset;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x01000000;
+#endif
+	}
+}
+
+static int
+nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv25_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv20_fb_tile_init;
+	priv->base.tile.comp = nv25_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv25_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x25),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv25_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = _nouveau_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index cba67bc91390..a7ba0d048aec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -34,17 +34,36 @@ void
 nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nouveau_fb_tile *tile)
 {
-	tile->addr = addr | 1;
+	/* for performance, select alternate bank offset for zeta */
+	if (!(flags & 4)) {
+		tile->addr = (0 << 4);
+	} else {
+		if (pfb->tile.comp) /* z compression */
+			pfb->tile.comp(pfb, i, size, flags, tile);
+		tile->addr = (1 << 4);
+	}
+
+	tile->addr |= 0x00000001; /* enable */
+	tile->addr |= addr;
 	tile->limit = max(1u, addr + size) - 1;
 	tile->pitch = pitch;
 }
 
-void
-nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static void
+nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
 {
-	tile->addr  = 0;
-	tile->limit = 0;
-	tile->pitch = 0;
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
+		else           tile->zcomp |= 0x02000000; /* Z24S8 */
+		tile->zcomp |= ((tile->tag->offset           ) >> 6);
+		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x10000000;
+#endif
+	}
 }
 
 static int
@@ -72,7 +91,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
 	return x;
 }
 
-static int
+int
 nv30_fb_init(struct nouveau_object *object)
 {
 	struct nouveau_device *device = nv_device(object);
@@ -111,7 +130,6 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_object **pobject)
 {
 	struct nv30_fb_priv *priv;
-	u32 pbus1218;
 	int ret;
 
 	ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -119,21 +137,14 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	pbus1218 = nv_rd32(priv, 0x001218);
-	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
-	}
-	priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
 	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
 	priv->base.tile.regions = 8;
 	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.fini = nv30_fb_tile_fini;
-	priv->base.tile.prog = nv10_fb_tile_prog;
-	return nouveau_fb_created(&priv->base);
+	priv->base.tile.comp = nv30_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644
index 000000000000..092f6f4f3521
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv35_fb_priv {
+	struct nouveau_fb base;
+};
+
+static void
+nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
+		else           tile->zcomp |= 0x08000000; /* Z24S8 */
+		tile->zcomp |= ((tile->tag->offset           ) >> 6);
+		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x40000000;
+#endif
+	}
+}
+
+static int
+nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv35_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv35_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv35_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x35),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv35_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv30_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644
index 000000000000..797ab3b821b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv36_fb_priv {
+	struct nouveau_fb base;
+};
+
+static void
+nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
+		else           tile->zcomp |= 0x20000000; /* Z24S8 */
+		tile->zcomp |= ((tile->tag->offset           ) >> 6);
+		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x80000000;
+#endif
+	}
+}
+
+static int
+nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv36_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv36_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv36_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x36),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv36_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv30_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 347a496fcad8..65e131b90f37 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -30,34 +30,37 @@ struct nv40_fb_priv {
 	struct nouveau_fb base;
 };
 
-static inline int
-nv44_graph_class(struct nouveau_device *device)
+static int
+nv40_fb_vram_init(struct nouveau_fb *pfb)
 {
-	if ((device->chipset & 0xf0) == 0x60)
-		return 1;
+	u32 pbus1218 = nv_rd32(pfb, 0x001218);
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+	case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+	}
 
-	return !(0x0baf & (1 << (device->chipset & 0x0f)));
+	pfb->ram.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+	pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	return nv_rd32(pfb, 0x100320);
 }
 
-static void
-nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+void
+nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-}
-
-static void
-nv40_fb_init_gart(struct nv40_fb_priv *priv)
-{
-	nv_wr32(priv, 0x100800, 0x00000001);
-}
-
-static void
-nv44_fb_init_gart(struct nv40_fb_priv *priv)
-{
-	nv_wr32(priv, 0x100850, 0x80000000);
-	nv_wr32(priv, 0x100800, 0x00000001);
+	u32 tiles = DIV_ROUND_UP(size, 0x80);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x100);
+	if ( (flags & 2) &&
+	    !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		tile->zcomp  = 0x28000000; /* Z24S8_SPLIT_GRAD */
+		tile->zcomp |= ((tile->tag->offset           ) >> 8);
+		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x40000000;
+#endif
+	}
 }
 
 static int
@@ -70,19 +73,7 @@ nv40_fb_init(struct nouveau_object *object)
 	if (ret)
 		return ret;
 
-	switch (nv_device(priv)->chipset) {
-	case 0x40:
-	case 0x45:
-		nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
-		break;
-	default:
-		if (nv44_graph_class(nv_device(priv)))
-			nv44_fb_init_gart(priv);
-		else
-			nv40_fb_init_gart(priv);
-		break;
-	}
-
+	nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
 	return 0;
 }
 
@@ -91,7 +82,6 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
 	     struct nouveau_object **pobject)
 {
-	struct nouveau_device *device = nv_device(parent);
 	struct nv40_fb_priv *priv;
 	int ret;
 
@@ -100,69 +90,14 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (ret)
 		return ret;
 
-	/* 0x001218 is actually present on a few other NV4X I looked at,
-	 * and even contains sane values matching 0x100474.  From looking
-	 * at various vbios images however, this isn't the case everywhere.
-	 * So, I chose to use the same regs I've seen NVIDIA reading around
-	 * the memory detection, hopefully that'll get us the right numbers
-	 */
-	if (device->chipset == 0x40) {
-		u32 pbus1218 = nv_rd32(priv, 0x001218);
-		switch (pbus1218 & 0x00000300) {
-		case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
-		case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-		case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-		case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
-		}
-	} else
-	if (device->chipset == 0x49 || device->chipset == 0x4b) {
-		u32 pfb914 = nv_rd32(priv, 0x100914);
-		switch (pfb914 & 0x00000003) {
-		case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-		case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
-		case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-		case 0x00000003: break;
-		}
-	} else
-	if (device->chipset != 0x4e) {
-		u32 pfb474 = nv_rd32(priv, 0x100474);
-		if (pfb474 & 0x00000004)
-			priv->base.ram.type = NV_MEM_TYPE_GDDR3;
-		if (pfb474 & 0x00000002)
-			priv->base.ram.type = NV_MEM_TYPE_DDR2;
-		if (pfb474 & 0x00000001)
-			priv->base.ram.type = NV_MEM_TYPE_DDR1;
-	} else {
-		priv->base.ram.type = NV_MEM_TYPE_STOLEN;
-	}
-
-	priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
 	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	switch (device->chipset) {
-	case 0x40:
-	case 0x45:
-		priv->base.tile.regions = 8;
-		break;
-	case 0x46:
-	case 0x47:
-	case 0x49:
-	case 0x4b:
-	case 0x4c:
-		priv->base.tile.regions = 15;
-		break;
-	default:
-		priv->base.tile.regions = 12;
-		break;
-	}
+	priv->base.ram.init = nv40_fb_vram_init;
+	priv->base.tile.regions = 8;
 	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.fini = nv30_fb_tile_fini;
-	if (device->chipset == 0x40)
-		priv->base.tile.prog = nv10_fb_tile_prog;
-	else
-		priv->base.tile.prog = nv40_fb_tile_prog;
-
-	return nouveau_fb_created(&priv->base);
+	priv->base.tile.comp = nv40_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
 }
 
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644
index 000000000000..e9e5a08c41a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv41_fb_priv {
+	struct nouveau_fb base;
+};
+
+int
+nv41_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 pfb474 = nv_rd32(pfb, 0x100474);
+	if (pfb474 & 0x00000004)
+		pfb->ram.type = NV_MEM_TYPE_GDDR3;
+	if (pfb474 & 0x00000002)
+		pfb->ram.type = NV_MEM_TYPE_DDR2;
+	if (pfb474 & 0x00000001)
+		pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+	pfb->ram.size =   nv_rd32(pfb, 0x10020c) & 0xff000000;
+	pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+	nv_rd32(pfb, 0x100600 + (i * 0x10));
+	nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+}
+
+int
+nv41_fb_init(struct nouveau_object *object)
+{
+	struct nv41_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x100800, 0x00000001);
+	return 0;
+}
+
+static int
+nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv41_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv41_fb_vram_init;
+	priv->base.tile.regions = 12;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv40_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv41_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv41_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x41),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv41_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv41_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644
index 000000000000..ae89b5006f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv44_fb_priv {
+	struct nouveau_fb base;
+};
+
+int
+nv44_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 pfb474 = nv_rd32(pfb, 0x100474);
+	if (pfb474 & 0x00000004)
+		pfb->ram.type = NV_MEM_TYPE_GDDR3;
+	if (pfb474 & 0x00000002)
+		pfb->ram.type = NV_MEM_TYPE_DDR2;
+	if (pfb474 & 0x00000001)
+		pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+	pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	return 0;
+}
+
+static void
+nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0x00000001; /* mode = vram */
+	tile->addr |= addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+void
+nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+	nv_rd32(pfb, 0x100600 + (i * 0x10));
+}
+
+int
+nv44_fb_init(struct nouveau_object *object)
+{
+	struct nv44_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x100850, 0x80000000);
+	nv_wr32(priv, 0x100800, 0x00000001);
+	return 0;
+}
+
+static int
+nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv44_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv44_fb_vram_init;
+	priv->base.tile.regions = 12;
+	priv->base.tile.init = nv44_fb_tile_init;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv44_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv44_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv44_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644
index 000000000000..589b93ea2994
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv46_fb_priv {
+	struct nouveau_fb base;
+};
+
+void
+nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	/* for performance, select alternate bank offset for zeta */
+	if (!(flags & 4)) tile->addr = (0 << 3);
+	else              tile->addr = (1 << 3);
+
+	tile->addr |= 0x00000001; /* mode = vram */
+	tile->addr |= addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+static int
+nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv46_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv44_fb_vram_init;
+	priv->base.tile.regions = 15;
+	priv->base.tile.init = nv46_fb_tile_init;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv44_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv46_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x46),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv46_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv44_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644
index 000000000000..818bba35b368
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv47_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv47_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv41_fb_vram_init;
+	priv->base.tile.regions = 15;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv40_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv41_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv47_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x47),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv47_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv41_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644
index 000000000000..84a31af16ab4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv49_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv49_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 pfb914 = nv_rd32(pfb, 0x100914);
+
+	switch (pfb914 & 0x00000003) {
+	case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+	case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000003: break;
+	}
+
+	pfb->ram.size =   nv_rd32(pfb, 0x10020c) & 0xff000000;
+	pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	return nv_rd32(pfb, 0x100320);
+}
+
+static int
+nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv49_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv49_fb_vram_init;
+	priv->base.tile.regions = 15;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv40_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv41_fb_tile_prog;
+
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv49_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x49),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv49_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv41_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644
index 000000000000..797fd558170b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv4e_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv4e_fb_vram_init(struct nouveau_fb *pfb)
+{
+	pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	pfb->ram.type = NV_MEM_TYPE_STOLEN;
+	return 0;
+}
+
+static int
+nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv4e_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv4e_fb_vram_init;
+	priv->base.tile.regions = 12;
+	priv->base.tile.init = nv46_fb_tile_init;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv44_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv4e_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x4e),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv4e_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv44_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 5f570806143a..487cb8c6c204 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -51,6 +51,101 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
 	return types[(memtype & 0xff00) >> 8] != 0;
 }
 
+static u32
+nv50_fb_vram_rblock(struct nouveau_fb *pfb)
+{
+	int i, parts, colbits, rowbitsa, rowbitsb, banks;
+	u64 rowsize, predicted;
+	u32 r0, r4, rt, ru, rblock_size;
+
+	r0 = nv_rd32(pfb, 0x100200);
+	r4 = nv_rd32(pfb, 0x100204);
+	rt = nv_rd32(pfb, 0x100250);
+	ru = nv_rd32(pfb, 0x001540);
+	nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+	for (i = 0, parts = 0; i < 8; i++) {
+		if (ru & (0x00010000 << i))
+			parts++;
+	}
+
+	colbits  =  (r4 & 0x0000f000) >> 12;
+	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+	banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+	rowsize = parts * banks * (1 << colbits) * 8;
+	predicted = rowsize << rowbitsa;
+	if (r0 & 0x00000004)
+		predicted += rowsize << rowbitsb;
+
+	if (predicted != pfb->ram.size) {
+		nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
+			(u32)(pfb->ram.size >> 20));
+	}
+
+	rblock_size = rowsize;
+	if (rt & 1)
+		rblock_size *= 3;
+
+	nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+	return rblock_size;
+}
+
+static int
+nv50_fb_vram_init(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+	u32 size;
+	int ret;
+
+	pfb->ram.size = nv_rd32(pfb, 0x10020c);
+	pfb->ram.size = (pfb->ram.size & 0xffffff00) |
+		       ((pfb->ram.size & 0x000000ff) << 32);
+
+	size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+	switch (device->chipset) {
+	case 0xaa:
+	case 0xac:
+	case 0xaf: /* IGPs, no reordering, no real VRAM */
+		ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
+		if (ret)
+			return ret;
+
+		pfb->ram.type   = NV_MEM_TYPE_STOLEN;
+		pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+		break;
+	default:
+		switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+		case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+		case 1:
+			if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+				pfb->ram.type = NV_MEM_TYPE_DDR3;
+			else
+				pfb->ram.type = NV_MEM_TYPE_DDR2;
+			break;
+		case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+		case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
+		case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
+		default:
+			break;
+		}
+
+		ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
+				      nv50_fb_vram_rblock(pfb) >> 12);
+		if (ret)
+			return ret;
+
+		pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+		break;
+	}
+
+	return nv_rd32(pfb, 0x100320);
+}
+
 static int
 nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
 		 u32 memtype, struct nouveau_mem **pmem)
@@ -140,195 +235,6 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
 	kfree(mem);
 }
 
-static u32
-nv50_vram_rblock(struct nv50_fb_priv *priv)
-{
-	int i, parts, colbits, rowbitsa, rowbitsb, banks;
-	u64 rowsize, predicted;
-	u32 r0, r4, rt, ru, rblock_size;
-
-	r0 = nv_rd32(priv, 0x100200);
-	r4 = nv_rd32(priv, 0x100204);
-	rt = nv_rd32(priv, 0x100250);
-	ru = nv_rd32(priv, 0x001540);
-	nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
-	for (i = 0, parts = 0; i < 8; i++) {
-		if (ru & (0x00010000 << i))
-			parts++;
-	}
-
-	colbits  =  (r4 & 0x0000f000) >> 12;
-	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
-	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
-	banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
-
-	rowsize = parts * banks * (1 << colbits) * 8;
-	predicted = rowsize << rowbitsa;
-	if (r0 & 0x00000004)
-		predicted += rowsize << rowbitsb;
-
-	if (predicted != priv->base.ram.size) {
-		nv_warn(priv, "memory controller reports %d MiB VRAM\n",
-			(u32)(priv->base.ram.size >> 20));
-	}
-
-	rblock_size = rowsize;
-	if (rt & 1)
-		rblock_size *= 3;
-
-	nv_debug(priv, "rblock %d bytes\n", rblock_size);
-	return rblock_size;
-}
-
-static int
-nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nouveau_device *device = nv_device(parent);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	struct nv50_fb_priv *priv;
-	u32 tags;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	switch (nv_rd32(priv, 0x100714) & 0x00000007) {
-	case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-	case 1:
-		if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
-			priv->base.ram.type = NV_MEM_TYPE_DDR3;
-		else
-			priv->base.ram.type = NV_MEM_TYPE_DDR2;
-		break;
-	case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-	case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
-	case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
-	default:
-		break;
-	}
-
-	priv->base.ram.size = nv_rd32(priv, 0x10020c);
-	priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
-			     ((priv->base.ram.size & 0x000000ff) << 32);
-
-	tags = nv_rd32(priv, 0x100320);
-	ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
-	if (ret)
-		return ret;
-
-	nv_debug(priv, "%d compression tags\n", tags);
-
-	size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
-	switch (device->chipset) {
-	case 0xaa:
-	case 0xac:
-	case 0xaf: /* IGPs, no reordering, no real VRAM */
-		ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
-		if (ret)
-			return ret;
-
-		priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
-		priv->base.ram.type = NV_MEM_TYPE_STOLEN;
-		break;
-	default:
-		ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
-				      nv50_vram_rblock(priv) >> 12);
-		if (ret)
-			return ret;
-
-		priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
-		break;
-	}
-
-	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (priv->r100c08_page) {
-		priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
-					     0, PAGE_SIZE,
-					     PCI_DMA_BIDIRECTIONAL);
-		if (pci_dma_mapping_error(device->pdev, priv->r100c08))
-			nv_warn(priv, "failed 0x100c08 page map\n");
-	} else {
-		nv_warn(priv, "failed 0x100c08 page alloc\n");
-	}
-
-	priv->base.memtype_valid = nv50_fb_memtype_valid;
-	priv->base.ram.get = nv50_fb_vram_new;
-	priv->base.ram.put = nv50_fb_vram_del;
-	return nouveau_fb_created(&priv->base);
-}
-
-static void
-nv50_fb_dtor(struct nouveau_object *object)
-{
-	struct nouveau_device *device = nv_device(object);
-	struct nv50_fb_priv *priv = (void *)object;
-
-	if (priv->r100c08_page) {
-		pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
-			       PCI_DMA_BIDIRECTIONAL);
-		__free_page(priv->r100c08_page);
-	}
-
-	nouveau_fb_destroy(&priv->base);
-}
-
-static int
-nv50_fb_init(struct nouveau_object *object)
-{
-	struct nouveau_device *device = nv_device(object);
-	struct nv50_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nouveau_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	/* Not a clue what this is exactly.  Without pointing it at a
-	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
-	 * cause IOMMU "read from address 0" errors (rh#561267)
-	 */
-	nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
-
-	/* This is needed to get meaningful information from 100c90
-	 * on traps. No idea what these values mean exactly. */
-	switch (device->chipset) {
-	case 0x50:
-		nv_wr32(priv, 0x100c90, 0x000707ff);
-		break;
-	case 0xa3:
-	case 0xa5:
-	case 0xa8:
-		nv_wr32(priv, 0x100c90, 0x000d0fff);
-		break;
-	case 0xaf:
-		nv_wr32(priv, 0x100c90, 0x089d1fff);
-		break;
-	default:
-		nv_wr32(priv, 0x100c90, 0x001d07ff);
-		break;
-	}
-
-	return 0;
-}
-
-struct nouveau_oclass
-nv50_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nouveau_fb_fini,
-	},
-};
-
 static const struct nouveau_enum vm_dispatch_subclients[] = {
 	{ 0x00000000, "GRCTX", NULL },
 	{ 0x00000001, "NOTIFY", NULL },
@@ -424,11 +330,11 @@ static const struct nouveau_enum vm_fault[] = {
 	{}
 };
 
-void
-nv50_fb_trap(struct nouveau_fb *pfb, int display)
+static void
+nv50_fb_intr(struct nouveau_subdev *subdev)
 {
-	struct nouveau_device *device = nv_device(pfb);
-	struct nv50_fb_priv *priv = (void *)pfb;
+	struct nouveau_device *device = nv_device(subdev);
+	struct nv50_fb_priv *priv = (void *)subdev;
 	const struct nouveau_enum *en, *cl;
 	u32 trap[6], idx, chan;
 	u8 st0, st1, st2, st3;
@@ -445,9 +351,6 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
 	}
 	nv_wr32(priv, 0x100c90, idx | 0x80000000);
 
-	if (!display)
-		return;
-
 	/* decode status bits into something more useful */
 	if (device->chipset  < 0xa3 ||
 	    device->chipset == 0xaa || device->chipset == 0xac) {
@@ -494,3 +397,101 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
 	else
 		printk("0x%08x\n", st1);
 }
+
+static int
+nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nv50_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (priv->r100c08_page) {
+		priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
+					     0, PAGE_SIZE,
+					     PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+			nv_warn(priv, "failed 0x100c08 page map\n");
+	} else {
+		nv_warn(priv, "failed 0x100c08 page alloc\n");
+	}
+
+	priv->base.memtype_valid = nv50_fb_memtype_valid;
+	priv->base.ram.init = nv50_fb_vram_init;
+	priv->base.ram.get = nv50_fb_vram_new;
+	priv->base.ram.put = nv50_fb_vram_del;
+	nv_subdev(priv)->intr = nv50_fb_intr;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+static void
+nv50_fb_dtor(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv50_fb_priv *priv = (void *)object;
+
+	if (priv->r100c08_page) {
+		pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
+			       PCI_DMA_BIDIRECTIONAL);
+		__free_page(priv->r100c08_page);
+	}
+
+	nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nv50_fb_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv50_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* Not a clue what this is exactly.  Without pointing it at a
+	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+	 * cause IOMMU "read from address 0" errors (rh#561267)
+	 */
+	nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+
+	/* This is needed to get meaningful information from 100c90
+	 * on traps. No idea what these values mean exactly. */
+	switch (device->chipset) {
+	case 0x50:
+		nv_wr32(priv, 0x100c90, 0x000707ff);
+		break;
+	case 0xa3:
+	case 0xa5:
+	case 0xa8:
+		nv_wr32(priv, 0x100c90, 0x000d0fff);
+		break;
+	case 0xaf:
+		nv_wr32(priv, 0x100c90, 0x089d1fff);
+		break;
+	default:
+		nv_wr32(priv, 0x100c90, 0x001d07ff);
+		break;
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 9f59f2bf0079..306bdf121452 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -61,6 +61,65 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 	return likely((types[memtype] == 1));
 }
 
+static int
+nvc0_fb_vram_init(struct nouveau_fb *pfb)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+	u32 parts = nv_rd32(pfb, 0x022438);
+	u32 pmask = nv_rd32(pfb, 0x022554);
+	u32 bsize = nv_rd32(pfb, 0x10f20c);
+	u32 offset, length;
+	bool uniform = true;
+	int ret, part;
+
+	nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
+	nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+	pfb->ram.type = nouveau_fb_bios_memtype(bios);
+	pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+
+	/* read amount of vram attached to each memory controller */
+	for (part = 0; part < parts; part++) {
+		if (!(pmask & (1 << part))) {
+			u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
+			if (psize != bsize) {
+				if (psize < bsize)
+					bsize = psize;
+				uniform = false;
+			}
+
+			nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
+			pfb->ram.size += (u64)psize << 20;
+		}
+	}
+
+	/* if all controllers have the same amount attached, there's no holes */
+	if (uniform) {
+		offset = rsvd_head;
+		length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+		return nouveau_mm_init(&pfb->vram, offset, length, 1);
+	}
+
+	/* otherwise, address lowest common amount from 0GiB */
+	ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
+	if (ret)
+		return ret;
+
+	/* and the rest starting from (8GiB + common_size) */
+	offset = (0x0200000000ULL >> 12) + (bsize << 8);
+	length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
+
+	ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
+	if (ret) {
+		nouveau_mm_fini(&pfb->vram);
+		return ret;
+	}
+
+	return 0;
+}
+
 static int
 nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
 		 u32 memtype, struct nouveau_mem **pmem)
@@ -138,66 +197,6 @@ nvc0_fb_dtor(struct nouveau_object *object)
 	nouveau_fb_destroy(&priv->base);
 }
 
-static int
-nvc0_vram_detect(struct nvc0_fb_priv *priv)
-{
-	struct nouveau_bios *bios = nouveau_bios(priv);
-	struct nouveau_fb *pfb = &priv->base;
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	u32 parts = nv_rd32(priv, 0x022438);
-	u32 pmask = nv_rd32(priv, 0x022554);
-	u32 bsize = nv_rd32(priv, 0x10f20c);
-	u32 offset, length;
-	bool uniform = true;
-	int ret, part;
-
-	nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
-	nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
-	priv->base.ram.type = nouveau_fb_bios_memtype(bios);
-	priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
-
-	/* read amount of vram attached to each memory controller */
-	for (part = 0; part < parts; part++) {
-		if (!(pmask & (1 << part))) {
-			u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
-			if (psize != bsize) {
-				if (psize < bsize)
-					bsize = psize;
-				uniform = false;
-			}
-
-			nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
-			priv->base.ram.size += (u64)psize << 20;
-		}
-	}
-
-	/* if all controllers have the same amount attached, there's no holes */
-	if (uniform) {
-		offset = rsvd_head;
-		length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
-		return nouveau_mm_init(&pfb->vram, offset, length, 1);
-	}
-
-	/* otherwise, address lowest common amount from 0GiB */
-	ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
-	if (ret)
-		return ret;
-
-	/* and the rest starting from (8GiB + common_size) */
-	offset = (0x0200000000ULL >> 12) + (bsize << 8);
-	length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
-
-	ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
-	if (ret) {
-		nouveau_mm_fini(&pfb->vram);
-		return ret;
-	}
-
-	return 0;
-}
-
 static int
 nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
@@ -213,13 +212,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		return ret;
 
 	priv->base.memtype_valid = nvc0_fb_memtype_valid;
+	priv->base.ram.init = nvc0_fb_vram_init;
 	priv->base.ram.get = nvc0_fb_vram_new;
 	priv->base.ram.put = nv50_fb_vram_del;
 
-	ret = nvc0_vram_detect(priv);
-	if (ret)
-		return ret;
-
 	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 	if (!priv->r100c10_page)
 		return -ENOMEM;
@@ -229,7 +225,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	if (pci_dma_mapping_error(device->pdev, priv->r100c10))
 		return -EFAULT;
 
-	return nouveau_fb_created(&priv->base);
+	return nouveau_fb_preinit(&priv->base);
 }
 
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index fe1ebf199ba9..dc27e794a851 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch)
 		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
 		udelay(1);
 		if (!timeout--) {
-			AUX_ERR("begin idle timeout 0x%08x", ctrl);
+			AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
 			return -EBUSY;
 		}
 	} while (ctrl & 0x03010000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index ba4d28b50368..f5bbd3834116 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object)
 }
 
 static u32
-nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
 {
 	struct nv04_instobj_priv *node = (void *)object;
 	return nv_ro32(object->engine, node->mem->offset + addr);
 }
 
 static void
-nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
 	struct nv04_instobj_priv *node = (void *)object;
 	nv_wo32(object->engine, node->mem->offset + addr, data);
@@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object)
 }
 
 static u32
-nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
 {
 	return nv_rd32(object, 0x700000 + addr);
 }
 
 static void
-nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
 	return nv_wr32(object, 0x700000 + addr, data);
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 73c52ebd5932..da64253201ef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 }
 
 static u32
-nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
 {
 	struct nv04_instmem_priv *priv = (void *)object;
 	return ioread32_native(priv->iomem + addr);
 }
 
 static void
-nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
 	struct nv04_instmem_priv *priv = (void *)object;
 	iowrite32_native(data, priv->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 27ef0891d10b..cfc7e31461de 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object)
 }
 
 static u32
-nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
+nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
 {
 	struct nv50_instmem_priv *priv = (void *)object->engine;
 	struct nv50_instobj_priv *node = (void *)object;
@@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
 }
 
 static void
-nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
+nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
 {
 	struct nv50_instmem_priv *priv = (void *)object->engine;
 	struct nv50_instobj_priv *node = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index de5721cfc4c2..8379aafa6e1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
 	struct nouveau_mc *pmc = nouveau_mc(subdev);
 	const struct nouveau_mc_intr *map = pmc->intr_map;
 	struct nouveau_subdev *unit;
-	u32 stat;
+	u32 stat, intr;
 
-	stat = nv_rd32(pmc, 0x000100);
+	intr = stat = nv_rd32(pmc, 0x000100);
 	while (stat && map->stat) {
 		if (stat & map->stat) {
 			unit = nouveau_subdev(subdev, map->unit);
 			if (unit && unit->intr)
 				unit->intr(unit);
-			stat &= ~map->stat;
+			intr &= ~map->stat;
 		}
 		map++;
 	}
 
-	if (stat) {
+	if (intr) {
 		nv_error(pmc, "unknown intr 0x%08x\n", stat);
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index cedf33b02977..8d759f830323 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -39,6 +39,7 @@ nv50_mc_intr[] = {
 	{ 0x00200000, NVDEV_SUBDEV_GPIO },
 	{ 0x04000000, NVDEV_ENGINE_DISP },
 	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x0000d101, NVDEV_SUBDEV_FB },
 	{},
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index a001e4c4d38d..ceb5c83f9459 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -40,6 +40,7 @@ nv98_mc_intr[] = {
 	{ 0x00400000, NVDEV_ENGINE_COPY0 },	/* NVA3-     */
 	{ 0x04000000, NVDEV_ENGINE_DISP },
 	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x0040d101, NVDEV_SUBDEV_FB },
 	{},
 };
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c2b81e30a17d..92796682722d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,6 +36,7 @@ nvc0_mc_intr[] = {
 	{ 0x00000100, NVDEV_ENGINE_FIFO },
 	{ 0x00001000, NVDEV_ENGINE_GR },
 	{ 0x00008000, NVDEV_ENGINE_BSP },
+	{ 0x00020000, NVDEV_ENGINE_VP },
 	{ 0x00100000, NVDEV_SUBDEV_TIMER },
 	{ 0x00200000, NVDEV_SUBDEV_GPIO },
 	{ 0x02000000, NVDEV_SUBDEV_LTCG },
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index cbf1fc60a386..41241922263f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -246,14 +246,26 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 		return nouveau_abi16_put(abi16, -ENODEV);
 
 	client = nv_client(abi16->client);
-
-	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
-		return nouveau_abi16_put(abi16, -EINVAL);
-
 	device = nv_device(abi16->device);
 	imem   = nouveau_instmem(device);
 	pfb    = nouveau_fb(device);
 
+	/* hack to allow channel engine type specification on kepler */
+	if (device->card_type >= NV_E0) {
+		if (init->fb_ctxdma_handle != ~0)
+			init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+		else
+			init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+
+		/* allow flips to be executed if this is a graphics channel */
+		init->tt_ctxdma_handle = 0;
+		if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+			init->tt_ctxdma_handle = 1;
+	}
+
+	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+		return nouveau_abi16_put(abi16, -EINVAL);
+
 	/* allocate "abi16 channel" data and make up a handle for it */
 	init->channel = ffsll(~abi16->handles);
 	if (!init->channel--)
@@ -268,11 +280,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 	abi16->handles |= (1 << init->channel);
 
 	/* create channel object and initialise dma and fence management */
-	if (device->card_type >= NV_E0) {
-		init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
-		init->tt_ctxdma_handle = 0;
-	}
-
 	ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
 				  init->channel, init->fb_ctxdma_handle,
 				  init->tt_ctxdma_handle, &chan->chan);
@@ -382,7 +389,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 	struct nouveau_abi16_chan *chan, *temp;
 	struct nouveau_abi16_ntfy *ntfy;
 	struct nouveau_object *object;
-	struct nv_dma_class args;
+	struct nv_dma_class args = {};
 	int ret;
 
 	if (unlikely(!abi16))
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48783e14114c..d97f20069d3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -35,6 +35,14 @@ static struct nouveau_dsm_priv {
 	acpi_handle rom_handle;
 } nouveau_dsm_priv;
 
+bool nouveau_is_optimus(void) {
+	return nouveau_dsm_priv.optimus_detected;
+}
+
+bool nouveau_is_v1_dsm(void) {
+	return nouveau_dsm_priv.dsm_detected;
+}
+
 #define NOUVEAU_DSM_HAS_MUX 0x1
 #define NOUVEAU_DSM_HAS_OPT 0x2
 
@@ -183,9 +191,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
 
 static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
 {
-	/* perhaps the _DSM functions are mutually exclusive, but prepare for
-	 * the future */
-	if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+	if (!nouveau_dsm_priv.dsm_detected)
 		return 0;
 	if (id == VGA_SWITCHEROO_IGD)
 		return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
@@ -201,7 +207,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
 
 	/* Optimus laptops have the card already disabled in
 	 * nouveau_switcheroo_set_state */
-	if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+	if (!nouveau_dsm_priv.dsm_detected)
 		return 0;
 
 	return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
@@ -283,7 +289,15 @@ static bool nouveau_dsm_detect(void)
 			has_optimus = 1;
 	}
 
-	if (vga_count == 2 && has_dsm && guid_valid) {
+	/* find the optimus DSM or the old v1 DSM */
+	if (has_optimus == 1) {
+		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
+			&buffer);
+		printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+			acpi_method_name);
+		nouveau_dsm_priv.optimus_detected = true;
+		ret = true;
+	} else if (vga_count == 2 && has_dsm && guid_valid) {
 		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
 			&buffer);
 		printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
@@ -292,14 +306,6 @@ static bool nouveau_dsm_detect(void)
 		ret = true;
 	}
 
-	if (has_optimus == 1) {
-		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
-			&buffer);
-		printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
-			acpi_method_name);
-		nouveau_dsm_priv.optimus_detected = true;
-		ret = true;
-	}
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 08af67722b57..d0da230d7706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -4,6 +4,8 @@
 #define ROM_BIOS_PAGE 4096
 
 #if defined(CONFIG_ACPI)
+bool nouveau_is_optimus(void);
+bool nouveau_is_v1_dsm(void);
 void nouveau_register_dsm_handler(void);
 void nouveau_unregister_dsm_handler(void);
 void nouveau_switcheroo_optimus_dsm(void);
@@ -11,6 +13,8 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
 bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
 void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
 #else
+static inline bool nouveau_is_optimus(void) { return false; };
+static inline bool nouveau_is_v1_dsm(void) { return false; };
 static inline void nouveau_register_dsm_handler(void) {}
 static inline void nouveau_unregister_dsm_handler(void) {}
 static inline void nouveau_switcheroo_optimus_dsm(void) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 09fdef235882..865eddfa30a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -624,206 +624,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
 	return 0;
 }
 
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-bool
-bios_encoder_match(struct dcb_output *dcb, u32 hash)
-{
-	if ((hash & 0x000000f0) != (dcb->location << 4))
-		return false;
-	if ((hash & 0x0000000f) != dcb->type)
-		return false;
-	if (!(hash & (dcb->or << 16)))
-		return false;
-
-	switch (dcb->type) {
-	case DCB_OUTPUT_TMDS:
-	case DCB_OUTPUT_LVDS:
-	case DCB_OUTPUT_DP:
-		if (hash & 0x00c00000) {
-			if (!(hash & (dcb->sorconf.link << 22)))
-				return false;
-		}
-	default:
-		return true;
-	}
-}
-
-int
-nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
-			       struct dcb_output *dcbent, int crtc)
-{
-	/*
-	 * The display script table is located by the BIT 'U' table.
-	 *
-	 * It contains an array of pointers to various tables describing
-	 * a particular output type.  The first 32-bits of the output
-	 * tables contains similar information to a DCB entry, and is
-	 * used to decide whether that particular table is suitable for
-	 * the output you want to access.
-	 *
-	 * The "record header length" field here seems to indicate the
-	 * offset of the first configuration entry in the output tables.
-	 * This is 10 on most cards I've seen, but 12 has been witnessed
-	 * on DP cards, and there's another script pointer within the
-	 * header.
-	 *
-	 * offset + 0   ( 8 bits): version
-	 * offset + 1   ( 8 bits): header length
-	 * offset + 2   ( 8 bits): record length
-	 * offset + 3   ( 8 bits): number of records
-	 * offset + 4   ( 8 bits): record header length
-	 * offset + 5   (16 bits): pointer to first output script table
-	 */
-
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvbios *bios = &drm->vbios;
-	uint8_t *table = &bios->data[bios->display.script_table_ptr];
-	uint8_t *otable = NULL;
-	uint16_t script;
-	int i;
-
-	if (!bios->display.script_table_ptr) {
-		NV_ERROR(drm, "No pointer to output script table\n");
-		return 1;
-	}
-
-	/*
-	 * Nothing useful has been in any of the pre-2.0 tables I've seen,
-	 * so until they are, we really don't need to care.
-	 */
-	if (table[0] < 0x20)
-		return 1;
-
-	if (table[0] != 0x20 && table[0] != 0x21) {
-		NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
-			 table[0]);
-		return 1;
-	}
-
-	/*
-	 * The output script tables describing a particular output type
-	 * look as follows:
-	 *
-	 * offset + 0   (32 bits): output this table matches (hash of DCB)
-	 * offset + 4   ( 8 bits): unknown
-	 * offset + 5   ( 8 bits): number of configurations
-	 * offset + 6   (16 bits): pointer to some script
-	 * offset + 8   (16 bits): pointer to some script
-	 *
-	 * headerlen == 10
-	 * offset + 10           : configuration 0
-	 *
-	 * headerlen == 12
-	 * offset + 10           : pointer to some script
-	 * offset + 12           : configuration 0
-	 *
-	 * Each config entry is as follows:
-	 *
-	 * offset + 0   (16 bits): unknown, assumed to be a match value
-	 * offset + 2   (16 bits): pointer to script table (clock set?)
-	 * offset + 4   (16 bits): pointer to script table (reset?)
-	 *
-	 * There doesn't appear to be a count value to say how many
-	 * entries exist in each script table, instead, a 0 value in
-	 * the first 16-bit word seems to indicate both the end of the
-	 * list and the default entry.  The second 16-bit word in the
-	 * script tables is a pointer to the script to execute.
-	 */
-
-	NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
-			dcbent->type, dcbent->location, dcbent->or);
-	for (i = 0; i < table[3]; i++) {
-		otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
-		if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
-			break;
-	}
-
-	if (!otable) {
-		NV_DEBUG(drm, "failed to match any output table\n");
-		return 1;
-	}
-
-	if (pclk < -2 || pclk > 0) {
-		/* Try to find matching script table entry */
-		for (i = 0; i < otable[5]; i++) {
-			if (ROM16(otable[table[4] + i*6]) == type)
-				break;
-		}
-
-		if (i == otable[5]) {
-			NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
-				      "using first\n",
-				 type, dcbent->type, dcbent->or);
-			i = 0;
-		}
-	}
-
-	if (pclk == 0) {
-		script = ROM16(otable[6]);
-		if (!script) {
-			NV_DEBUG(drm, "output script 0 not found\n");
-			return 1;
-		}
-
-		NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-	} else
-	if (pclk == -1) {
-		script = ROM16(otable[8]);
-		if (!script) {
-			NV_DEBUG(drm, "output script 1 not found\n");
-			return 1;
-		}
-
-		NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-	} else
-	if (pclk == -2) {
-		if (table[4] >= 12)
-			script = ROM16(otable[10]);
-		else
-			script = 0;
-		if (!script) {
-			NV_DEBUG(drm, "output script 2 not found\n");
-			return 1;
-		}
-
-		NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-	} else
-	if (pclk > 0) {
-		script = ROM16(otable[table[4] + i*6 + 2]);
-		if (script)
-			script = clkcmptable(bios, script, pclk);
-		if (!script) {
-			NV_DEBUG(drm, "clock script 0 not found\n");
-			return 1;
-		}
-
-		NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-	} else
-	if (pclk < 0) {
-		script = ROM16(otable[table[4] + i*6 + 4]);
-		if (script)
-			script = clkcmptable(bios, script, -pclk);
-		if (!script) {
-			NV_DEBUG(drm, "clock script 1 not found\n");
-			return 1;
-		}
-
-		NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-	}
-
-	return 0;
-}
-
-
 int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
 {
 	/*
@@ -1212,31 +1012,6 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
 	return 0;
 }
 
-static int
-parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
-		      struct bit_entry *bitentry)
-{
-	/*
-	 * Parses the pointer to the G80 output script tables
-	 *
-	 * Starting at bitentry->offset:
-	 *
-	 * offset + 0  (16 bits): output script table pointer
-	 */
-
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	uint16_t outputscripttableptr;
-
-	if (bitentry->length != 3) {
-		NV_ERROR(drm, "Do not understand BIT U table\n");
-		return -EINVAL;
-	}
-
-	outputscripttableptr = ROM16(bios->data[bitentry->offset]);
-	bios->display.script_table_ptr = outputscripttableptr;
-	return 0;
-}
-
 struct bit_table {
 	const char id;
 	int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -1313,7 +1088,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
 	parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
 	parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
 	parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
-	parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
 
 	return 0;
 }
@@ -2324,7 +2098,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvbios *bios = &drm->vbios;
-	int i, ret = 0;
+	int ret = 0;
 
 	/* Reset the BIOS head to 0. */
 	bios->state.crtchead = 0;
@@ -2337,13 +2111,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
 		bios->fp.lvds_init_run = false;
 	}
 
-	if (nv_device(drm->device)->card_type >= NV_50) {
-		for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
-			nouveau_bios_run_display_table(dev, 0, 0,
-						       &bios->dcb.entry[i], -1);
-		}
-	}
-
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 3befbb821a56..f68c54ca422f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -127,12 +127,6 @@ struct nvbios {
 		int crtchead;
 	} state;
 
-	struct {
-		struct dcb_output *output;
-		int crtc;
-		uint16_t script_table_ptr;
-	} display;
-
 	struct {
 		uint16_t fptablepointer;	/* also used by tmds */
 		uint16_t fpxlatetableptr;
@@ -185,8 +179,6 @@ void nouveau_bios_takedown(struct drm_device *dev);
 int nouveau_run_vbios_init(struct drm_device *);
 struct dcb_connector_table_entry *
 nouveau_bios_connector_entry(struct drm_device *, int index);
-int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
-					  struct dcb_output *, int crtc);
 bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
 uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
 int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -195,6 +187,5 @@ int run_tmds_table(struct drm_device *, struct dcb_output *,
 			  int head, int pxclk);
 int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
 			    enum LVDS_script, int pxclk);
-bool bios_encoder_match(struct dcb_output *, u32 hash);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 35ac57f0aab6..5614c89148cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -225,7 +225,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
 
 	ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
 			  type, &nvbo->placement,
-			  align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
+			  align >> PAGE_SHIFT, false, NULL, acc_size, sg,
 			  nouveau_bo_del_ttm);
 	if (ret) {
 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
 
 	nouveau_bo_placement_set(nvbo, memtype, 0);
 
-	ret = nouveau_bo_validate(nvbo, false, false, false);
+	ret = nouveau_bo_validate(nvbo, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 
 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 
-	ret = nouveau_bo_validate(nvbo, false, false, false);
+	ret = nouveau_bo_validate(nvbo, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
 
 int
 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
-		    bool no_wait_reserve, bool no_wait_gpu)
+		    bool no_wait_gpu)
 {
 	int ret;
 
-	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
-			      no_wait_reserve, no_wait_gpu);
+	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+			      interruptible, no_wait_gpu);
 	if (ret)
 		return ret;
 
@@ -556,8 +556,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 static int
 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
 			      struct nouveau_bo *nvbo, bool evict,
-			      bool no_wait_reserve, bool no_wait_gpu,
-			      struct ttm_mem_reg *new_mem)
+			      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
 	struct nouveau_fence *fence = NULL;
 	int ret;
@@ -566,8 +565,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
 	if (ret)
 		return ret;
 
-	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
-					no_wait_reserve, no_wait_gpu, new_mem);
+	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
+					no_wait_gpu, new_mem);
 	nouveau_fence_unref(&fence);
 	return ret;
 }
@@ -965,8 +964,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
 
 static int
 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-		     bool no_wait_reserve, bool no_wait_gpu,
-		     struct ttm_mem_reg *new_mem)
+		     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +993,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
 	ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
 	if (ret == 0) {
 		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
-						    no_wait_reserve,
 						    no_wait_gpu, new_mem);
 	}
 
@@ -1064,8 +1061,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
 
 static int
 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-		      bool no_wait_reserve, bool no_wait_gpu,
-		      struct ttm_mem_reg *new_mem)
+		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
 	struct ttm_placement placement;
@@ -1078,7 +1074,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
-	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
 	if (ret)
 		return ret;
 
@@ -1086,11 +1082,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 	if (ret)
 		goto out;
 
-	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
 	if (ret)
 		goto out;
 
-	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+	ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
 out:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return ret;
@@ -1098,8 +1094,7 @@ out:
 
 static int
 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-		      bool no_wait_reserve, bool no_wait_gpu,
-		      struct ttm_mem_reg *new_mem)
+		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
 	struct ttm_placement placement;
@@ -1112,15 +1107,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
 
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
-	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
 	if (ret)
 		return ret;
 
-	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+	ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
 	if (ret)
 		goto out;
 
-	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
+	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
 	if (ret)
 		goto out;
 
@@ -1195,8 +1190,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 
 static int
 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
-		bool no_wait_reserve, bool no_wait_gpu,
-		struct ttm_mem_reg *new_mem)
+		bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1214,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 
 	/* CPU copy if we have no accelerated method available */
 	if (!drm->ttm.move) {
-		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 		goto out;
 	}
 
 	/* Hardware assisted copy. */
 	if (new_mem->mem_type == TTM_PL_SYSTEM)
-		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+		ret = nouveau_bo_move_flipd(bo, evict, intr,
+					    no_wait_gpu, new_mem);
 	else if (old_mem->mem_type == TTM_PL_SYSTEM)
-		ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+		ret = nouveau_bo_move_flips(bo, evict, intr,
+					    no_wait_gpu, new_mem);
 	else
-		ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+		ret = nouveau_bo_move_m2mf(bo, evict, intr,
+					   no_wait_gpu, new_mem);
 
 	if (!ret)
 		goto out;
 
 	/* Fallback to software copy. */
-	ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 
 out:
 	if (nv_device(drm->device)->card_type < NV_50) {
@@ -1343,7 +1340,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 	nvbo->placement.fpfn = 0;
 	nvbo->placement.lpfn = mappable;
 	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
-	return nouveau_bo_validate(nvbo, false, true, false);
+	return nouveau_bo_validate(nvbo, false, false);
 }
 
 static int
@@ -1472,19 +1469,19 @@ nouveau_bo_fence_ref(void *sync_obj)
 }
 
 static bool
-nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_signalled(void *sync_obj)
 {
 	return nouveau_fence_done(sync_obj);
 }
 
 static int
-nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
 {
 	return nouveau_fence_wait(sync_obj, lazy, intr);
 }
 
 static int
-nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_flush(void *sync_obj)
 {
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index dec51b1098fe..25ca37989d2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -76,7 +76,7 @@ u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
 void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
 int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
-			 bool no_wait_reserve, bool no_wait_gpu);
+			 bool no_wait_gpu);
 
 struct nouveau_vma *
 nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index c1d7301c0e9c..174300b6a02e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -76,6 +76,8 @@ nouveau_channel_del(struct nouveau_channel **pchan)
 		nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
 		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
 		nouveau_bo_unmap(chan->push.buffer);
+		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+			nouveau_bo_unpin(chan->push.buffer);
 		nouveau_bo_ref(NULL, &chan->push.buffer);
 		kfree(chan);
 	}
@@ -267,7 +269,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 	struct nouveau_fb *pfb = nouveau_fb(device);
 	struct nouveau_software_chan *swch;
 	struct nouveau_object *object;
-	struct nv_dma_class args;
+	struct nv_dma_class args = {};
 	int ret, i;
 
 	/* allocate dma objects to cover all allowed vram, and gart */
@@ -346,7 +348,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 	/* allocate software object class (used for fences on <= nv05, and
 	 * to signal flip completion), bind it to a subchannel.
 	 */
-	if (chan != chan->drm->cechan) {
+	if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
 		ret = nouveau_object_new(nv_object(client), chan->handle,
 					 NvSw, nouveau_abi16_swclass(chan->drm),
 					 NULL, 0, &object);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d3595b23434a..ac340ba32017 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -110,7 +110,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
 	dev  = nv_connector->base.dev;
 	drm  = nouveau_drm(dev);
 	gpio = nouveau_gpio(drm->device);
-	NV_DEBUG(drm, "\n");
 
 	if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
 		gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
@@ -221,7 +220,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
 	}
 
 	if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
-		drm_connector_property_set_value(connector,
+		drm_object_property_set_value(&connector->base,
 			dev->mode_config.dvi_i_subconnector_property,
 			nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
 			DRM_MODE_SUBCONNECTOR_DVID :
@@ -929,8 +928,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	int type, ret = 0;
 	bool dummy;
 
-	NV_DEBUG(drm, "\n");
-
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		nv_connector = nouveau_connector(connector);
 		if (nv_connector->index == index)
@@ -1043,7 +1040,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 
 	/* Init DVI-I specific properties */
 	if (nv_connector->type == DCB_CONNECTOR_DVI_I)
-		drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+		drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
 
 	/* Add overscan compensation options to digital outputs */
 	if (disp->underscan_property &&
@@ -1051,31 +1048,31 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	     type == DRM_MODE_CONNECTOR_DVII ||
 	     type == DRM_MODE_CONNECTOR_HDMIA ||
 	     type == DRM_MODE_CONNECTOR_DisplayPort)) {
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      disp->underscan_property,
 					      UNDERSCAN_OFF);
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      disp->underscan_hborder_property,
 					      0);
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      disp->underscan_vborder_property,
 					      0);
 	}
 
 	/* Add hue and saturation options */
 	if (disp->vibrant_hue_property)
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      disp->vibrant_hue_property,
 					      90);
 	if (disp->color_vibrance_property)
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 					      disp->color_vibrance_property,
 					      150);
 
 	switch (nv_connector->type) {
 	case DCB_CONNECTOR_VGA:
 		if (nv_device(drm->device)->card_type >= NV_50) {
-			drm_connector_attach_property(connector,
+			drm_object_attach_property(&connector->base,
 					dev->mode_config.scaling_mode_property,
 					nv_connector->scaling_mode);
 		}
@@ -1088,18 +1085,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	default:
 		nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
 
-		drm_connector_attach_property(connector,
+		drm_object_attach_property(&connector->base,
 				dev->mode_config.scaling_mode_property,
 				nv_connector->scaling_mode);
 		if (disp->dithering_mode) {
 			nv_connector->dithering_mode = DITHERING_MODE_AUTO;
-			drm_connector_attach_property(connector,
+			drm_object_attach_property(&connector->base,
 						disp->dithering_mode,
 						nv_connector->dithering_mode);
 		}
 		if (disp->dithering_depth) {
 			nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
-			drm_connector_attach_property(connector,
+			drm_object_attach_property(&connector->base,
 						disp->dithering_depth,
 						nv_connector->dithering_depth);
 		}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index ebdb87670a8f..20eb84cce9e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,6 +28,7 @@
 #define __NOUVEAU_CONNECTOR_H__
 
 #include <drm/drm_edid.h>
+#include "nouveau_crtc.h"
 
 struct nouveau_i2c_port;
 
@@ -80,6 +81,21 @@ static inline struct nouveau_connector *nouveau_connector(
 	return container_of(con, struct nouveau_connector, base);
 }
 
+static inline struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder && connector->encoder->crtc == crtc)
+			return nouveau_connector(connector);
+	}
+
+	return NULL;
+}
+
 struct drm_connector *
 nouveau_connector_create(struct drm_device *, int index);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index e6d0d1eb0133..d1e5890784d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -82,16 +82,6 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
 	return &crtc->base;
 }
 
-int nv50_crtc_create(struct drm_device *dev, int index);
-int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
-			 uint32_t buffer_handle, uint32_t width,
-			 uint32_t height);
-int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
-
 int nv04_cursor_init(struct nouveau_crtc *);
-int nv50_cursor_init(struct nouveau_crtc *);
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
 
 #endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 86124b131f4f..e4188f24fc75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
 			nv_fb->r_dma = NvEvoVRAM_LP;
 
 		switch (fb->depth) {
-		case  8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
-		case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
-		case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+		case  8: nv_fb->r_format = 0x1e00; break;
+		case 15: nv_fb->r_format = 0xe900; break;
+		case 16: nv_fb->r_format = 0xe800; break;
 		case 24:
-		case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
-		case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+		case 32: nv_fb->r_format = 0xcf00; break;
+		case 30: nv_fb->r_format = 0xd100; break;
 		default:
 			 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
 			 return -EINVAL;
@@ -324,7 +324,7 @@ nouveau_display_create(struct drm_device *dev)
 	disp->underscan_vborder_property =
 		drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
 
-	if (gen == 1) {
+	if (gen >= 1) {
 		disp->vibrant_hue_property =
 			drm_property_create(dev, DRM_MODE_PROP_RANGE,
 					    "vibrant hue", 2);
@@ -366,10 +366,7 @@ nouveau_display_create(struct drm_device *dev)
 		if (nv_device(drm->device)->card_type < NV_50)
 			ret = nv04_display_create(dev);
 		else
-		if (nv_device(drm->device)->card_type < NV_D0)
 			ret = nv50_display_create(dev);
-		else
-			ret = nvd0_display_create(dev);
 		if (ret)
 			goto disp_create_err;
 
@@ -400,11 +397,12 @@ nouveau_display_destroy(struct drm_device *dev)
 	nouveau_backlight_exit(dev);
 	drm_vblank_cleanup(dev);
 
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+
 	if (disp->dtor)
 		disp->dtor(dev);
 
-	drm_kms_helper_poll_fini(dev);
-	drm_mode_config_cleanup(dev);
 	nouveau_drm(dev)->display = NULL;
 	kfree(disp);
 }
@@ -659,10 +657,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
 	/* Emit a page flip */
 	if (nv_device(drm->device)->card_type >= NV_50) {
-		if (nv_device(drm->device)->card_type >= NV_D0)
-			ret = nvd0_display_flip_next(crtc, fb, chan, 0);
-		else
-			ret = nv50_display_flip_next(crtc, fb, chan);
+		ret = nv50_display_flip_next(crtc, fb, chan, 0);
 		if (ret) {
 			mutex_unlock(&chan->cli->mutex);
 			goto fail_unreserve;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 978a108ba7a1..59838651ee8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,60 +30,17 @@
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
 
+#include <core/class.h>
+
 #include <subdev/gpio.h>
 #include <subdev/i2c.h>
 
-u8 *
-nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct bit_entry d;
-	u8 *table;
-	int i;
-
-	if (bit_table(dev, 'd', &d)) {
-		NV_ERROR(drm, "BIT 'd' table not found\n");
-		return NULL;
-	}
-
-	if (d.version != 1) {
-		NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
-		return NULL;
-	}
-
-	table = ROMPTR(dev, d.data[0]);
-	if (!table) {
-		NV_ERROR(drm, "displayport table pointer invalid\n");
-		return NULL;
-	}
-
-	switch (table[0]) {
-	case 0x20:
-	case 0x21:
-	case 0x30:
-	case 0x40:
-		break;
-	default:
-		NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
-		return NULL;
-	}
-
-	for (i = 0; i < table[3]; i++) {
-		*entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
-		if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
-			return table;
-	}
-
-	NV_ERROR(drm, "displayport encoder table not found\n");
-	return NULL;
-}
-
 /******************************************************************************
  * link training
  *****************************************************************************/
 struct dp_state {
 	struct nouveau_i2c_port *auxch;
-	struct dp_train_func *func;
+	struct nouveau_object *core;
 	struct dcb_output *dcb;
 	int crtc;
 	u8 *dpcd;
@@ -97,13 +54,20 @@ static void
 dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_output *dcb = dp->dcb;
+	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+	const u32 moff = (dp->crtc << 3) | (link << 2) | or;
 	u8 sink[2];
+	u32 data;
 
 	NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
 
 	/* set desired link configuration on the source */
-	dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
-			   dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
+	data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
+	if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
+		data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
+
+	nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
 
 	/* inform the sink of the new configuration */
 	sink[0] = dp->link_bw / 27000;
@@ -118,11 +82,14 @@ static void
 dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_output *dcb = dp->dcb;
+	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+	const u32 moff = (dp->crtc << 3) | (link << 2) | or;
 	u8 sink_tp;
 
 	NV_DEBUG(drm, "training pattern %d\n", pattern);
 
-	dp->func->train_set(dev, dp->dcb, pattern);
+	nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
 
 	nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
 	sink_tp &= ~DP_TRAINING_PATTERN_MASK;
@@ -134,6 +101,9 @@ static int
 dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_output *dcb = dp->dcb;
+	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+	const u32 moff = (dp->crtc << 3) | (link << 2) | or;
 	int i;
 
 	for (i = 0; i < dp->link_nr; i++) {
@@ -148,7 +118,8 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
 			dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
 		NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
-		dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
+
+		nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
 	}
 
 	return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
@@ -234,59 +205,32 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
 }
 
 static void
-dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
+dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
 {
-	u16 script = 0x0000;
-	u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
-	if (table) {
-		if (table[0] >= 0x20 && table[0] <= 0x30) {
-			if (enable) script = ROM16(entry[12]);
-			else        script = ROM16(entry[14]);
-		} else
-		if (table[0] == 0x40) {
-			if (enable) script = ROM16(entry[11]);
-			else        script = ROM16(entry[13]);
-		}
-	}
+	struct dcb_output *dcb = dp->dcb;
+	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+	const u32 moff = (dp->crtc << 3) | (link << 2) | or;
 
-	nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
-{
-	u16 script = 0x0000;
-	u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
-	if (table) {
-		if (table[0] >= 0x20 && table[0] <= 0x30)
-			script = ROM16(entry[6]);
-		else
-		if (table[0] == 0x40)
-			script = ROM16(entry[5]);
-	}
-
-	nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+	nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
+			  NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
+			  NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
+			  NV94_DISP_SOR_DP_TRAIN_OP_INIT);
 }
 
 static void
 dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
 {
-	u16 script = 0x0000;
-	u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
-	if (table) {
-		if (table[0] >= 0x20 && table[0] <= 0x30)
-			script = ROM16(entry[8]);
-		else
-		if (table[0] == 0x40)
-			script = ROM16(entry[7]);
-	}
+	struct dcb_output *dcb = dp->dcb;
+	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+	const u32 moff = (dp->crtc << 3) | (link << 2) | or;
 
-	nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+	nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
+			  NV94_DISP_SOR_DP_TRAIN_OP_FINI);
 }
 
 static bool
 nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
-		      struct dp_train_func *func)
+		      struct nouveau_object *core)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -304,7 +248,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
 	if (!dp.auxch)
 		return false;
 
-	dp.func = func;
+	dp.core = core;
 	dp.dcb = nv_encoder->dcb;
 	dp.crtc = nv_crtc->index;
 	dp.dpcd = nv_encoder->dp.dpcd;
@@ -318,11 +262,8 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
 	 */
 	gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
 
-	/* enable down-spreading, if possible */
-	dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
-	/* execute pre-train script from vbios */
-	dp_link_train_init(dev, &dp);
+	/* enable down-spreading and execute pre-train script from vbios */
+	dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
 
 	/* start off at highest link rate supported by encoder and display */
 	while (*link_bw > nv_encoder->dp.link_bw)
@@ -365,7 +306,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
 
 void
 nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
-		struct dp_train_func *func)
+		struct nouveau_object *core)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
@@ -385,7 +326,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
 	nv_wraux(auxch, DP_SET_POWER, &status, 1);
 
 	if (mode == DRM_MODE_DPMS_ON)
-		nouveau_dp_link_train(encoder, datarate, func);
+		nouveau_dp_link_train(encoder, datarate, core);
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8503b2ea570a..01c403ddb99b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -49,8 +49,6 @@
 #include "nouveau_fbcon.h"
 #include "nouveau_fence.h"
 
-#include "nouveau_ttm.h"
-
 MODULE_PARM_DESC(config, "option string to pass to driver core");
 static char *nouveau_config;
 module_param_named(config, nouveau_config, charp, 0400);
@@ -149,7 +147,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
 			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
 		arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
-		arg1 = 0;
+		arg1 = 1;
 	} else {
 		arg0 = NvDmaFB;
 		arg1 = NvDmaTT;
@@ -224,6 +222,7 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
 	boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
 	remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+	kfree(aper);
 
 	ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
 				    nouveau_config, nouveau_debug, &device);
@@ -395,17 +394,12 @@ nouveau_drm_remove(struct pci_dev *pdev)
 }
 
 int
-nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+nouveau_do_suspend(struct drm_device *dev)
 {
-	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli;
 	int ret;
 
-	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
-	    pm_state.event == PM_EVENT_PRETHAW)
-		return 0;
-
 	if (dev->mode_config.num_crtc) {
 		NV_INFO(drm, "suspending fbcon...\n");
 		nouveau_fbcon_set_suspend(dev, 1);
@@ -436,13 +430,6 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
 		goto fail_client;
 
 	nouveau_agp_fini(drm);
-
-	pci_save_state(pdev);
-	if (pm_state.event == PM_EVENT_SUSPEND) {
-		pci_disable_device(pdev);
-		pci_set_power_state(pdev, PCI_D3hot);
-	}
-
 	return 0;
 
 fail_client:
@@ -457,24 +444,33 @@ fail_client:
 	return ret;
 }
 
-int
-nouveau_drm_resume(struct pci_dev *pdev)
+int nouveau_pmops_suspend(struct device *dev)
 {
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_cli *cli;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	int ret;
 
-	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	NV_INFO(drm, "re-enabling device...\n");
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	ret = pci_enable_device(pdev);
+	ret = nouveau_do_suspend(drm_dev);
 	if (ret)
 		return ret;
-	pci_set_master(pdev);
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+int
+nouveau_do_resume(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_cli *cli;
+
+	NV_INFO(drm, "re-enabling device...\n");
 
 	nouveau_agp_reset(drm);
 
@@ -500,6 +496,42 @@ nouveau_drm_resume(struct pci_dev *pdev)
 	return 0;
 }
 
+int nouveau_pmops_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+	pci_set_master(pdev);
+
+	return nouveau_do_resume(drm_dev);
+}
+
+static int nouveau_pmops_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	return nouveau_do_suspend(drm_dev);
+}
+
+static int nouveau_pmops_thaw(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	return nouveau_do_resume(drm_dev);
+}
+
+
 static int
 nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 {
@@ -652,14 +684,22 @@ nouveau_drm_pci_table[] = {
 	{}
 };
 
+static const struct dev_pm_ops nouveau_pm_ops = {
+	.suspend = nouveau_pmops_suspend,
+	.resume = nouveau_pmops_resume,
+	.freeze = nouveau_pmops_freeze,
+	.thaw = nouveau_pmops_thaw,
+	.poweroff = nouveau_pmops_freeze,
+	.restore = nouveau_pmops_resume,
+};
+
 static struct pci_driver
 nouveau_drm_pci_driver = {
 	.name = "nouveau",
 	.id_table = nouveau_drm_pci_table,
 	.probe = nouveau_drm_probe,
 	.remove = nouveau_drm_remove,
-	.suspend = nouveau_drm_suspend,
-	.resume = nouveau_drm_resume,
+	.driver.pm = &nouveau_pm_ops,
 };
 
 static int __init
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index a10169927086..aa89eb938b47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev)
 	return nv_device(nouveau_drm(dev)->device);
 }
 
-int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
-int nouveau_drm_resume(struct pci_dev *);
+int nouveau_pmops_suspend(struct device *);
+int nouveau_pmops_resume(struct device *);
 
 #define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
 #define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 6a17bf2ba9a4..d0d95bd511ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -93,14 +93,9 @@ get_slave_funcs(struct drm_encoder *enc)
 /* nouveau_dp.c */
 bool nouveau_dp_detect(struct drm_encoder *);
 void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
-		     struct dp_train_func *);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
+		     struct nouveau_object *);
 
 struct nouveau_connector *
 nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_connector *, struct dcb_output *);
-void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
-int nv50_dac_create(struct drm_connector *, struct dcb_output *);
-
 
 #endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5e2f52158f19..8bf695c52f95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 			return ret;
 		}
 
-		ret = nouveau_bo_validate(nvbo, true, false, false);
+		ret = nouveau_bo_validate(nvbo, true, false);
 		if (unlikely(ret)) {
 			if (ret != -ERESTARTSYS)
 				NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644
index 2c672cebc889..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-
-static bool
-hdmi_sor(struct drm_encoder *encoder)
-{
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	if (nv_device(drm->device)->chipset <  0xa3 ||
-	    nv_device(drm->device)->chipset == 0xaa ||
-	    nv_device(drm->device)->chipset == 0xac)
-		return false;
-	return true;
-}
-
-static inline u32
-hdmi_base(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
-	if (!hdmi_sor(encoder))
-		return 0x616500 + (nv_crtc->index * 0x800);
-	return 0x61c500 + (nv_encoder->or * 0x800);
-}
-
-static void
-hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
-{
-	struct nouveau_device *device = nouveau_dev(encoder->dev);
-	nv_wr32(device, hdmi_base(encoder) + reg, val);
-}
-
-static u32
-hdmi_rd32(struct drm_encoder *encoder, u32 reg)
-{
-	struct nouveau_device *device = nouveau_dev(encoder->dev);
-	return nv_rd32(device, hdmi_base(encoder) + reg);
-}
-
-static u32
-hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
-{
-	u32 tmp = hdmi_rd32(encoder, reg);
-	hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
-	return tmp;
-}
-
-static void
-nouveau_audio_disconnect(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_device *device = nouveau_dev(encoder->dev);
-	u32 or = nv_encoder->or * 0x800;
-
-	if (hdmi_sor(encoder))
-		nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
-}
-
-static void
-nouveau_audio_mode_set(struct drm_encoder *encoder,
-		       struct drm_display_mode *mode)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_device *device = nouveau_dev(encoder->dev);
-	struct nouveau_connector *nv_connector;
-	u32 or = nv_encoder->or * 0x800;
-	int i;
-
-	nv_connector = nouveau_encoder_connector_get(nv_encoder);
-	if (!drm_detect_monitor_audio(nv_connector->edid)) {
-		nouveau_audio_disconnect(encoder);
-		return;
-	}
-
-	if (hdmi_sor(encoder)) {
-		nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
-
-		drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
-		if (nv_connector->base.eld[0]) {
-			u8 *eld = nv_connector->base.eld;
-			for (i = 0; i < eld[2] * 4; i++)
-				nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
-			for (i = eld[2] * 4; i < 0x60; i++)
-				nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
-			nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
-		}
-	}
-}
-
-static void
-nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
-{
-	/* calculate checksum for the infoframe */
-	u8 sum = 0, i;
-	for (i = 0; i < frame[2]; i++)
-		sum += frame[i];
-	frame[3] = 256 - sum;
-
-	/* disable infoframe, and write header */
-	hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
-	hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
-
-	/* register scans tell me the audio infoframe has only one set of
-	 * subpack regs, according to tegra (gee nvidia, it'd be nice if we
-	 * could get those docs too!), the hdmi block pads out the rest of
-	 * the packet on its own.
-	 */
-	if (ctrl == 0x020)
-		frame[2] = 6;
-
-	/* write out checksum and data, weird weird 7 byte register pairs */
-	for (i = 0; i < frame[2] + 1; i += 7) {
-		u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
-		u32 *subpack = (u32 *)&frame[3 + i];
-		hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
-		hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
-	}
-
-	/* enable the infoframe */
-	hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
-}
-
-static void
-nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
-			     struct drm_display_mode *mode)
-{
-	const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
-	const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
-	const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
-	u8 frame[20];
-
-	frame[0x00] = 0x82; /* AVI infoframe */
-	frame[0x01] = 0x02; /* version */
-	frame[0x02] = 0x0d; /* length */
-	frame[0x03] = 0x00;
-	frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
-	frame[0x05] = (C << 6) | (M << 4) | R;
-	frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
-	frame[0x07] = VIC;
-	frame[0x08] = PR;
-	frame[0x09] = bar_top & 0xff;
-	frame[0x0a] = bar_top >> 8;
-	frame[0x0b] = bar_bottom & 0xff;
-	frame[0x0c] = bar_bottom >> 8;
-	frame[0x0d] = bar_left & 0xff;
-	frame[0x0e] = bar_left >> 8;
-	frame[0x0f] = bar_right & 0xff;
-	frame[0x10] = bar_right >> 8;
-	frame[0x11] = 0x00;
-	frame[0x12] = 0x00;
-	frame[0x13] = 0x00;
-
-	nouveau_hdmi_infoframe(encoder, 0x020, frame);
-}
-
-static void
-nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
-			     struct drm_display_mode *mode)
-{
-	const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
-	const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
-	u8 frame[12];
-
-	frame[0x00] = 0x84;	/* Audio infoframe */
-	frame[0x01] = 0x01;	/* version */
-	frame[0x02] = 0x0a;	/* length */
-	frame[0x03] = 0x00;
-	frame[0x04] = (CT << 4) | CC;
-	frame[0x05] = (SF << 2) | ceaSS;
-	frame[0x06] = FMT;
-	frame[0x07] = CA;
-	frame[0x08] = (DM_INH << 7) | (LSV << 3);
-	frame[0x09] = 0x00;
-	frame[0x0a] = 0x00;
-	frame[0x0b] = 0x00;
-
-	nouveau_hdmi_infoframe(encoder, 0x000, frame);
-}
-
-static void
-nouveau_hdmi_disconnect(struct drm_encoder *encoder)
-{
-	nouveau_audio_disconnect(encoder);
-
-	/* disable audio and avi infoframes */
-	hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
-	hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
-
-	/* disable hdmi */
-	hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
-}
-
-void
-nouveau_hdmi_mode_set(struct drm_encoder *encoder,
-		      struct drm_display_mode *mode)
-{
-	struct nouveau_device *device = nouveau_dev(encoder->dev);
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_connector *nv_connector;
-	u32 max_ac_packet, rekey;
-
-	nv_connector = nouveau_encoder_connector_get(nv_encoder);
-	if (!mode || !nv_connector || !nv_connector->edid ||
-	    !drm_detect_hdmi_monitor(nv_connector->edid)) {
-		nouveau_hdmi_disconnect(encoder);
-		return;
-	}
-
-	nouveau_hdmi_video_infoframe(encoder, mode);
-	nouveau_hdmi_audio_infoframe(encoder, mode);
-
-	hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
-	hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
-	hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
-
-	nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-	nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-	nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
-	/* value matches nvidia binary driver, and tegra constant */
-	rekey = 56;
-
-	max_ac_packet  = mode->htotal - mode->hdisplay;
-	max_ac_packet -= rekey;
-	max_ac_packet -= 18; /* constant from tegra */
-	max_ac_packet /= 32;
-
-	/* enable hdmi */
-	hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
-					      0x1f000000 | /* unknown */
-					      max_ac_packet << 16 |
-					      rekey);
-
-	nouveau_audio_mode_set(encoder, mode);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 1d8cb506a28a..1303680affd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -60,18 +60,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
 		return IRQ_NONE;
 
 	nv_subdev(pmc)->intr(nv_subdev(pmc));
-
-	if (dev->mode_config.num_crtc) {
-		if (device->card_type >= NV_D0) {
-			if (nv_rd32(device, 0x000100) & 0x04000000)
-				nvd0_display_intr(dev);
-		} else
-		if (device->card_type >= NV_50) {
-			if (nv_rd32(device, 0x000100) & 0x04000000)
-				nv50_display_intr(dev);
-		}
-	}
-
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 366462cf8a2c..3543fec2355e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -155,10 +155,6 @@ nouveau_prime_new(struct drm_device *dev,
 		return ret;
 	nvbo = *pnvbo;
 
-	/* we restrict allowed domains on nv50+ to only the types
-	 * that were requested at creation time.  not possibly on
-	 * earlier chips without busting the ABI.
-	 */
 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
 	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
 	if (!nvbo->gem) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 6f0ac64873df..25d3495725eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -31,12 +31,11 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
 			     enum vga_switcheroo_state state)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
-	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 
 	if (state == VGA_SWITCHEROO_ON) {
 		printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-		nouveau_drm_resume(pdev);
+		nouveau_pmops_resume(&pdev->dev);
 		drm_kms_helper_poll_enable(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 	} else {
@@ -44,7 +43,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		drm_kms_helper_poll_disable(dev);
 		nouveau_switcheroo_optimus_dsm();
-		nouveau_drm_suspend(pdev, pmm);
+		nouveau_pmops_suspend(&pdev->dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 82a0d9c6cda3..6578cd28c556 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -730,6 +730,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
 	drm_crtc_cleanup(crtc);
 
 	nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+	nouveau_bo_unpin(nv_crtc->cursor.nvbo);
 	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
 	kfree(nv_crtc);
 }
@@ -1056,8 +1057,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
 			     0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
 	if (!ret) {
 		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-		if (!ret)
+		if (!ret) {
 			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+			if (ret)
+				nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+		}
 		if (ret)
 			nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
 	}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 846050f04c23..2cd6fb8c548e 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -60,8 +60,6 @@ nv04_display_create(struct drm_device *dev)
 	struct nv04_display *disp;
 	int i, ret;
 
-	NV_DEBUG(drm, "\n");
-
 	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
 	if (!disp)
 		return -ENOMEM;
@@ -132,13 +130,10 @@ nv04_display_create(struct drm_device *dev)
 void
 nv04_display_destroy(struct drm_device *dev)
 {
-	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv04_display *disp = nv04_display(dev);
 	struct drm_encoder *encoder;
 	struct drm_crtc *crtc;
 
-	NV_DEBUG(drm, "\n");
-
 	/* Turn every CRTC off. */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct drm_mode_set modeset = {
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index ce752bf5cc4e..7ae7f97a6d4d 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -155,6 +155,8 @@ nv10_fence_destroy(struct nouveau_drm *drm)
 {
 	struct nv10_fence_priv *priv = drm->fence;
 	nouveau_bo_unmap(priv->bo);
+	if (priv->bo)
+		nouveau_bo_unpin(priv->bo);
 	nouveau_bo_ref(NULL, &priv->bo);
 	drm->fence = NULL;
 	kfree(priv);
@@ -183,8 +185,11 @@ nv10_fence_create(struct nouveau_drm *drm)
 				     0, 0x0000, NULL, &priv->bo);
 		if (!ret) {
 			ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
-			if (!ret)
+			if (!ret) {
 				ret = nouveau_bo_map(priv->bo);
+				if (ret)
+					nouveau_bo_unpin(priv->bo);
+			}
 			if (ret)
 				nouveau_bo_ref(NULL, &priv->bo);
 		}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 897b63621e2d..2ca276ada507 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 		break;
 	}
 
-	drm_connector_property_set_value(connector,
+	drm_object_property_set_value(&connector->base,
 					 conf->tv_subconnector_property,
 					 tv_enc->subconnector);
 
@@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
 
 	drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
 
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 					conf->tv_select_subconnector_property,
 					tv_enc->select_subconnector);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 					conf->tv_subconnector_property,
 					tv_enc->subconnector);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 					conf->tv_mode_property,
 					tv_enc->tv_norm);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 					conf->tv_flicker_reduction_property,
 					tv_enc->flicker);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 					conf->tv_saturation_property,
 					tv_enc->saturation);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 					conf->tv_hue_property,
 					tv_enc->hue);
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 					conf->tv_overscan_property,
 					tv_enc->overscan);
 
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
deleted file mode 100644
index 222de77d6269..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ /dev/null
@@ -1,764 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_hw.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-#include <subdev/clock.h>
-
-static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
-{
-	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
-	int i;
-
-	NV_DEBUG(drm, "\n");
-
-	for (i = 0; i < 256; i++) {
-		writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
-		writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
-		writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
-	}
-
-	if (nv_crtc->lut.depth == 30) {
-		writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
-		writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
-		writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
-	}
-}
-
-int
-nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
-{
-	struct drm_device *dev = nv_crtc->base.dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	int index = nv_crtc->index, ret;
-
-	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-	NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
-
-	if (blanked) {
-		nv_crtc->cursor.hide(nv_crtc, false);
-
-		ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
-		if (ret) {
-			NV_ERROR(drm, "no space while blanking crtc\n");
-			return ret;
-		}
-		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
-		OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
-		OUT_RING(evo, 0);
-		if (nv_device(drm->device)->chipset != 0x50) {
-			BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
-			OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
-		}
-
-		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
-		OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
-	} else {
-		if (nv_crtc->cursor.visible)
-			nv_crtc->cursor.show(nv_crtc, false);
-		else
-			nv_crtc->cursor.hide(nv_crtc, false);
-
-		ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
-		if (ret) {
-			NV_ERROR(drm, "no space while unblanking crtc\n");
-			return ret;
-		}
-		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
-		OUT_RING(evo, nv_crtc->lut.depth == 8 ?
-				NV50_EVO_CRTC_CLUT_MODE_OFF :
-				NV50_EVO_CRTC_CLUT_MODE_ON);
-		OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
-		if (nv_device(drm->device)->chipset != 0x50) {
-			BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
-			OUT_RING(evo, NvEvoVRAM);
-		}
-
-		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
-		OUT_RING(evo, nv_crtc->fb.offset >> 8);
-		OUT_RING(evo, 0);
-		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
-		if (nv_device(drm->device)->chipset != 0x50)
-			if (nv_crtc->fb.tile_flags == 0x7a00 ||
-			    nv_crtc->fb.tile_flags == 0xfe00)
-				OUT_RING(evo, NvEvoFB32);
-			else
-			if (nv_crtc->fb.tile_flags == 0x7000)
-				OUT_RING(evo, NvEvoFB16);
-			else
-				OUT_RING(evo, NvEvoVRAM_LP);
-		else
-			OUT_RING(evo, NvEvoVRAM_LP);
-	}
-
-	nv_crtc->fb.blanked = blanked;
-	return 0;
-}
-
-static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
-	struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
-	struct nouveau_connector *nv_connector;
-	struct drm_connector *connector;
-	int head = nv_crtc->index, ret;
-	u32 mode = 0x00;
-
-	nv_connector = nouveau_crtc_connector_get(nv_crtc);
-	connector = &nv_connector->base;
-	if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
-		if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
-			mode = DITHERING_MODE_DYNAMIC2X2;
-	} else {
-		mode = nv_connector->dithering_mode;
-	}
-
-	if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
-		if (connector->display_info.bpc >= 8)
-			mode |= DITHERING_DEPTH_8BPC;
-	} else {
-		mode |= nv_connector->dithering_depth;
-	}
-
-	ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
-	if (ret == 0) {
-		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
-		OUT_RING  (evo, mode);
-		if (update) {
-			BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-			OUT_RING  (evo, 0);
-			FIRE_RING (evo);
-		}
-	}
-
-	return ret;
-}
-
-static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
-{
-	struct drm_device *dev = nv_crtc->base.dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	int ret;
-	int adj;
-	u32 hue, vib;
-
-	NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
-		     nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
-
-	ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
-	if (ret) {
-		NV_ERROR(drm, "no space while setting color vibrance\n");
-		return ret;
-	}
-
-	adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
-	vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
-
-	hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
-
-	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
-	OUT_RING  (evo, (hue << 20) | (vib << 8));
-
-	if (update) {
-		BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-		OUT_RING  (evo, 0);
-		FIRE_RING (evo);
-	}
-
-	return 0;
-}
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
-{
-	struct drm_device *dev = nv_crtc->base.dev;
-	struct drm_connector *connector;
-	struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
-
-	/* The safest approach is to find an encoder with the right crtc, that
-	 * is also linked to a connector. */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (connector->encoder)
-			if (connector->encoder->crtc == crtc)
-				return nouveau_connector(connector);
-	}
-
-	return NULL;
-}
-
-static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
-	struct nouveau_connector *nv_connector;
-	struct drm_crtc *crtc = &nv_crtc->base;
-	struct drm_device *dev = crtc->dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	struct drm_display_mode *umode = &crtc->mode;
-	struct drm_display_mode *omode;
-	int scaling_mode, ret;
-	u32 ctrl = 0, oX, oY;
-
-	NV_DEBUG(drm, "\n");
-
-	nv_connector = nouveau_crtc_connector_get(nv_crtc);
-	if (!nv_connector || !nv_connector->native_mode) {
-		NV_ERROR(drm, "no native mode, forcing panel scaling\n");
-		scaling_mode = DRM_MODE_SCALE_NONE;
-	} else {
-		scaling_mode = nv_connector->scaling_mode;
-	}
-
-	/* start off at the resolution we programmed the crtc for, this
-	 * effectively handles NONE/FULL scaling
-	 */
-	if (scaling_mode != DRM_MODE_SCALE_NONE)
-		omode = nv_connector->native_mode;
-	else
-		omode = umode;
-
-	oX = omode->hdisplay;
-	oY = omode->vdisplay;
-	if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
-		oY *= 2;
-
-	/* add overscan compensation if necessary, will keep the aspect
-	 * ratio the same as the backend mode unless overridden by the
-	 * user setting both hborder and vborder properties.
-	 */
-	if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
-			     (nv_connector->underscan == UNDERSCAN_AUTO &&
-			      nv_connector->edid &&
-			      drm_detect_hdmi_monitor(nv_connector->edid)))) {
-		u32 bX = nv_connector->underscan_hborder;
-		u32 bY = nv_connector->underscan_vborder;
-		u32 aspect = (oY << 19) / oX;
-
-		if (bX) {
-			oX -= (bX * 2);
-			if (bY) oY -= (bY * 2);
-			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-		} else {
-			oX -= (oX >> 4) + 32;
-			if (bY) oY -= (bY * 2);
-			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-		}
-	}
-
-	/* handle CENTER/ASPECT scaling, taking into account the areas
-	 * removed already for overscan compensation
-	 */
-	switch (scaling_mode) {
-	case DRM_MODE_SCALE_CENTER:
-		oX = min((u32)umode->hdisplay, oX);
-		oY = min((u32)umode->vdisplay, oY);
-		/* fall-through */
-	case DRM_MODE_SCALE_ASPECT:
-		if (oY < oX) {
-			u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
-			oX = ((oY * aspect) + (aspect / 2)) >> 19;
-		} else {
-			u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
-			oY = ((oX * aspect) + (aspect / 2)) >> 19;
-		}
-		break;
-	default:
-		break;
-	}
-
-	if (umode->hdisplay != oX || umode->vdisplay != oY ||
-	    umode->flags & DRM_MODE_FLAG_INTERLACE ||
-	    umode->flags & DRM_MODE_FLAG_DBLSCAN)
-		ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
-
-	ret = RING_SPACE(evo, 5);
-	if (ret)
-		return ret;
-
-	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
-	OUT_RING  (evo, ctrl);
-	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
-	OUT_RING  (evo, oY << 16 | oX);
-	OUT_RING  (evo, oY << 16 | oX);
-
-	if (update) {
-		nv50_display_flip_stop(crtc);
-		nv50_display_sync(dev);
-		nv50_display_flip_next(crtc, crtc->fb, NULL);
-	}
-
-	return 0;
-}
-
-int
-nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_clock *clk = nouveau_clock(device);
-
-	return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
-}
-
-static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-
-	NV_DEBUG(drm, "\n");
-
-	nouveau_bo_unmap(nv_crtc->lut.nvbo);
-	nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-	nouveau_bo_unmap(nv_crtc->cursor.nvbo);
-	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-	drm_crtc_cleanup(&nv_crtc->base);
-	kfree(nv_crtc);
-}
-
-int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
-		     uint32_t buffer_handle, uint32_t width, uint32_t height)
-{
-	struct drm_device *dev = crtc->dev;
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct nouveau_bo *cursor = NULL;
-	struct drm_gem_object *gem;
-	int ret = 0, i;
-
-	if (!buffer_handle) {
-		nv_crtc->cursor.hide(nv_crtc, true);
-		return 0;
-	}
-
-	if (width != 64 || height != 64)
-		return -EINVAL;
-
-	gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
-	if (!gem)
-		return -ENOENT;
-	cursor = nouveau_gem_object(gem);
-
-	ret = nouveau_bo_map(cursor);
-	if (ret)
-		goto out;
-
-	/* The simple will do for now. */
-	for (i = 0; i < 64 * 64; i++)
-		nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
-
-	nouveau_bo_unmap(cursor);
-
-	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
-	nv_crtc->cursor.show(nv_crtc, true);
-
-out:
-	drm_gem_object_unreference_unlocked(gem);
-	return ret;
-}
-
-int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-	nv_crtc->cursor.set_pos(nv_crtc, x, y);
-	return 0;
-}
-
-static void
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-		    uint32_t start, uint32_t size)
-{
-	int end = (start + size > 256) ? 256 : start + size, i;
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-	for (i = start; i < end; i++) {
-		nv_crtc->lut.r[i] = r[i];
-		nv_crtc->lut.g[i] = g[i];
-		nv_crtc->lut.b[i] = b[i];
-	}
-
-	/* We need to know the depth before we upload, but it's possible to
-	 * get called before a framebuffer is bound.  If this is the case,
-	 * mark the lut values as dirty by setting depth==0, and it'll be
-	 * uploaded on the first mode_set_base()
-	 */
-	if (!nv_crtc->base.fb) {
-		nv_crtc->lut.depth = 0;
-		return;
-	}
-
-	nv50_crtc_lut_load(crtc);
-}
-
-static void
-nv50_crtc_save(struct drm_crtc *crtc)
-{
-	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-	NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_crtc_restore(struct drm_crtc *crtc)
-{
-	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-	NV_ERROR(drm, "!!\n");
-}
-
-static const struct drm_crtc_funcs nv50_crtc_funcs = {
-	.save = nv50_crtc_save,
-	.restore = nv50_crtc_restore,
-	.cursor_set = nv50_crtc_cursor_set,
-	.cursor_move = nv50_crtc_cursor_move,
-	.gamma_set = nv50_crtc_gamma_set,
-	.set_config = drm_crtc_helper_set_config,
-	.page_flip = nouveau_crtc_page_flip,
-	.destroy = nv50_crtc_destroy,
-};
-
-static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
-	nv50_display_flip_stop(crtc);
-	drm_vblank_pre_modeset(dev, nv_crtc->index);
-	nv50_crtc_blank(nv_crtc, true);
-}
-
-static void
-nv50_crtc_commit(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
-	nv50_crtc_blank(nv_crtc, false);
-	drm_vblank_post_modeset(dev, nv_crtc->index);
-	nv50_display_sync(dev);
-	nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
-		     struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
-static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
-			   struct drm_framebuffer *passed_fb,
-			   int x, int y, bool atomic)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct drm_device *dev = nv_crtc->base.dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	struct drm_framebuffer *drm_fb;
-	struct nouveau_framebuffer *fb;
-	int ret;
-
-	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
-	/* no fb bound */
-	if (!atomic && !crtc->fb) {
-		NV_DEBUG(drm, "No FB bound\n");
-		return 0;
-	}
-
-	/* If atomic, we want to switch to the fb we were passed, so
-	 * now we update pointers to do that.  (We don't pin; just
-	 * assume we're already pinned and update the base address.)
-	 */
-	if (atomic) {
-		drm_fb = passed_fb;
-		fb = nouveau_framebuffer(passed_fb);
-	} else {
-		drm_fb = crtc->fb;
-		fb = nouveau_framebuffer(crtc->fb);
-		/* If not atomic, we can go ahead and pin, and unpin the
-		 * old fb we were passed.
-		 */
-		ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
-		if (ret)
-			return ret;
-
-		if (passed_fb) {
-			struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
-			nouveau_bo_unpin(ofb->nvbo);
-		}
-	}
-
-	nv_crtc->fb.offset = fb->nvbo->bo.offset;
-	nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
-	nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
-	if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
-		ret = RING_SPACE(evo, 2);
-		if (ret)
-			return ret;
-
-		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
-		OUT_RING  (evo, fb->r_dma);
-	}
-
-	ret = RING_SPACE(evo, 12);
-	if (ret)
-		return ret;
-
-	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
-	OUT_RING  (evo, nv_crtc->fb.offset >> 8);
-	OUT_RING  (evo, 0);
-	OUT_RING  (evo, (drm_fb->height << 16) | drm_fb->width);
-	OUT_RING  (evo, fb->r_pitch);
-	OUT_RING  (evo, fb->r_format);
-
-	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
-	OUT_RING  (evo, fb->base.depth == 8 ?
-		   NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
-
-	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
-	OUT_RING  (evo, (y << 16) | x);
-
-	if (nv_crtc->lut.depth != fb->base.depth) {
-		nv_crtc->lut.depth = fb->base.depth;
-		nv50_crtc_lut_load(crtc);
-	}
-
-	return 0;
-}
-
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
-		   struct drm_display_mode *mode, int x, int y,
-		   struct drm_framebuffer *old_fb)
-{
-	struct drm_device *dev = crtc->dev;
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	u32 head = nv_crtc->index * 0x400;
-	u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
-	u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
-	u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
-	u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
-	u32 vblan2e = 0, vblan2s = 1;
-	int ret;
-
-	/* hw timing description looks like this:
-	 *
-	 * <sync> <back porch> <---------display---------> <front porch>
-	 * ______
-	 *       |____________|---------------------------|____________|
-	 *
-	 *       ^ synce      ^ blanke                    ^ blanks     ^ active
-	 *
-	 * interlaced modes also have 2 additional values pointing at the end
-	 * and start of the next field's blanking period.
-	 */
-
-	hactive = mode->htotal;
-	hsynce  = mode->hsync_end - mode->hsync_start - 1;
-	hbackp  = mode->htotal - mode->hsync_end;
-	hblanke = hsynce + hbackp;
-	hfrontp = mode->hsync_start - mode->hdisplay;
-	hblanks = mode->htotal - hfrontp - 1;
-
-	vactive = mode->vtotal * vscan / ilace;
-	vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
-	vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
-	vblanke = vsynce + vbackp;
-	vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
-	vblanks = vactive - vfrontp - 1;
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		vblan2e = vactive + vsynce + vbackp;
-		vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
-		vactive = (vactive * 2) + 1;
-	}
-
-	ret = RING_SPACE(evo, 18);
-	if (ret == 0) {
-		BEGIN_NV04(evo, 0, 0x0804 + head, 2);
-		OUT_RING  (evo, 0x00800000 | mode->clock);
-		OUT_RING  (evo, (ilace == 2) ? 2 : 0);
-		BEGIN_NV04(evo, 0, 0x0810 + head, 6);
-		OUT_RING  (evo, 0x00000000); /* border colour */
-		OUT_RING  (evo, (vactive << 16) | hactive);
-		OUT_RING  (evo, ( vsynce << 16) | hsynce);
-		OUT_RING  (evo, (vblanke << 16) | hblanke);
-		OUT_RING  (evo, (vblanks << 16) | hblanks);
-		OUT_RING  (evo, (vblan2e << 16) | vblan2s);
-		BEGIN_NV04(evo, 0, 0x082c + head, 1);
-		OUT_RING  (evo, 0x00000000);
-		BEGIN_NV04(evo, 0, 0x0900 + head, 1);
-		OUT_RING  (evo, 0x00000311); /* makes sync channel work */
-		BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
-		OUT_RING  (evo, (umode->vdisplay << 16) | umode->hdisplay);
-		BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
-		OUT_RING  (evo, 0x00000000); /* screen position */
-	}
-
-	nv_crtc->set_dither(nv_crtc, false);
-	nv_crtc->set_scale(nv_crtc, false);
-	nv_crtc->set_color_vibrance(nv_crtc, false);
-
-	return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-}
-
-static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-			struct drm_framebuffer *old_fb)
-{
-	int ret;
-
-	nv50_display_flip_stop(crtc);
-	ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-	if (ret)
-		return ret;
-
-	ret = nv50_display_sync(crtc->dev);
-	if (ret)
-		return ret;
-
-	return nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
-			       struct drm_framebuffer *fb,
-			       int x, int y, enum mode_set_atomic state)
-{
-	int ret;
-
-	nv50_display_flip_stop(crtc);
-	ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
-	if (ret)
-		return ret;
-
-	return nv50_display_sync(crtc->dev);
-}
-
-static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
-	.dpms = nv50_crtc_dpms,
-	.prepare = nv50_crtc_prepare,
-	.commit = nv50_crtc_commit,
-	.mode_fixup = nv50_crtc_mode_fixup,
-	.mode_set = nv50_crtc_mode_set,
-	.mode_set_base = nv50_crtc_mode_set_base,
-	.mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
-	.load_lut = nv50_crtc_lut_load,
-};
-
-int
-nv50_crtc_create(struct drm_device *dev, int index)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_crtc *nv_crtc = NULL;
-	int ret, i;
-
-	NV_DEBUG(drm, "\n");
-
-	nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
-	if (!nv_crtc)
-		return -ENOMEM;
-
-	nv_crtc->index = index;
-	nv_crtc->set_dither = nv50_crtc_set_dither;
-	nv_crtc->set_scale = nv50_crtc_set_scale;
-	nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
-	nv_crtc->color_vibrance = 50;
-	nv_crtc->vibrant_hue = 0;
-	nv_crtc->lut.depth = 0;
-	for (i = 0; i < 256; i++) {
-		nv_crtc->lut.r[i] = i << 8;
-		nv_crtc->lut.g[i] = i << 8;
-		nv_crtc->lut.b[i] = i << 8;
-	}
-
-	drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
-	drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
-	drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
-
-	ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
-			     0, 0x0000, NULL, &nv_crtc->lut.nvbo);
-	if (!ret) {
-		ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
-		if (!ret)
-			ret = nouveau_bo_map(nv_crtc->lut.nvbo);
-		if (ret)
-			nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-	}
-
-	if (ret)
-		goto out;
-
-
-	ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
-			     0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
-	if (!ret) {
-		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-		if (!ret)
-			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
-		if (ret)
-			nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-	}
-
-	if (ret)
-		goto out;
-
-	nv50_cursor_init(nv_crtc);
-out:
-	if (ret)
-		nv50_crtc_destroy(&nv_crtc->base);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
deleted file mode 100644
index 223da113ceee..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-static void
-nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
-{
-	struct drm_device *dev = nv_crtc->base.dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	int ret;
-
-	NV_DEBUG(drm, "\n");
-
-	if (update && nv_crtc->cursor.visible)
-		return;
-
-	ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
-	if (ret) {
-		NV_ERROR(drm, "no space while unhiding cursor\n");
-		return;
-	}
-
-	if (nv_device(drm->device)->chipset != 0x50) {
-		BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
-		OUT_RING(evo, NvEvoVRAM);
-	}
-	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
-	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
-	OUT_RING(evo, nv_crtc->cursor.offset >> 8);
-
-	if (update) {
-		BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-		OUT_RING(evo, 0);
-		FIRE_RING(evo);
-		nv_crtc->cursor.visible = true;
-	}
-}
-
-static void
-nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
-{
-	struct drm_device *dev = nv_crtc->base.dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	int ret;
-
-	NV_DEBUG(drm, "\n");
-
-	if (update && !nv_crtc->cursor.visible)
-		return;
-
-	ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
-	if (ret) {
-		NV_ERROR(drm, "no space while hiding cursor\n");
-		return;
-	}
-	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
-	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
-	OUT_RING(evo, 0);
-	if (nv_device(drm->device)->chipset != 0x50) {
-		BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
-		OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
-	}
-
-	if (update) {
-		BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-		OUT_RING(evo, 0);
-		FIRE_RING(evo);
-		nv_crtc->cursor.visible = false;
-	}
-}
-
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-	struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
-
-	nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
-	nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
-		((y & 0xFFFF) << 16) | (x & 0xFFFF));
-	/* Needed to make the cursor move. */
-	nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-	if (offset == nv_crtc->cursor.offset)
-		return;
-
-	nv_crtc->cursor.offset = offset;
-	if (nv_crtc->cursor.visible) {
-		nv_crtc->cursor.visible = false;
-		nv_crtc->cursor.show(nv_crtc, true);
-	}
-}
-
-int
-nv50_cursor_init(struct nouveau_crtc *nv_crtc)
-{
-	nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
-	nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
-	nv_crtc->cursor.hide = nv50_cursor_hide;
-	nv_crtc->cursor.show = nv50_cursor_show;
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
deleted file mode 100644
index 6a30a1748573..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	int ret;
-
-	if (!nv_encoder->crtc)
-		return;
-	nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
-	NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
-
-	ret = RING_SPACE(evo, 4);
-	if (ret) {
-		NV_ERROR(drm, "no space while disconnecting DAC\n");
-		return;
-	}
-	BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
-	OUT_RING  (evo, 0);
-	BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-	OUT_RING  (evo, 0);
-
-	nv_encoder->crtc = NULL;
-}
-
-static enum drm_connector_status
-nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_device *device = nouveau_dev(encoder->dev);
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	enum drm_connector_status status = connector_status_disconnected;
-	uint32_t dpms_state, load_pattern, load_state;
-	int or = nv_encoder->or;
-
-	nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
-	dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
-
-	nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
-		0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-	if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
-		     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
-		NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
-		NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
-			  nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
-		return status;
-	}
-
-	/* Use bios provided value if possible. */
-	if (drm->vbios.dactestval) {
-		load_pattern = drm->vbios.dactestval;
-		NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
-			  load_pattern);
-	} else {
-		load_pattern = 340;
-		NV_DEBUG(drm, "Using default load_pattern of %d\n",
-			 load_pattern);
-	}
-
-	nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
-		NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
-	mdelay(45); /* give it some time to process */
-	load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
-
-	nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
-	nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
-		NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-
-	if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
-			  NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
-		status = connector_status_connected;
-
-	if (status == connector_status_connected)
-		NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
-	else
-		NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
-
-	return status;
-}
-
-static void
-nv50_dac_dpms(struct drm_encoder *encoder, int mode)
-{
-	struct nouveau_device *device = nouveau_dev(encoder->dev);
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	uint32_t val;
-	int or = nv_encoder->or;
-
-	NV_DEBUG(drm, "or %d mode %d\n", or, mode);
-
-	/* wait for it to be done */
-	if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
-		     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
-		NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
-		NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
-			 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
-		return;
-	}
-
-	val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
-
-	if (mode != DRM_MODE_DPMS_ON)
-		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
-
-	switch (mode) {
-	case DRM_MODE_DPMS_STANDBY:
-		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
-		break;
-	case DRM_MODE_DPMS_SUSPEND:
-		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
-		break;
-	case DRM_MODE_DPMS_OFF:
-		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
-		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
-		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
-		break;
-	default:
-		break;
-	}
-
-	nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
-		NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-}
-
-static void
-nv50_dac_save(struct drm_encoder *encoder)
-{
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_dac_restore(struct drm_encoder *encoder)
-{
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder,
-		    const struct drm_display_mode *mode,
-		    struct drm_display_mode *adjusted_mode)
-{
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_connector *connector;
-
-	NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
-	connector = nouveau_encoder_connector_get(nv_encoder);
-	if (!connector) {
-		NV_ERROR(drm, "Encoder has no connector\n");
-		return false;
-	}
-
-	if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
-	     connector->native_mode)
-		drm_mode_copy(adjusted_mode, connector->native_mode);
-
-	return true;
-}
-
-static void
-nv50_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-		  struct drm_display_mode *adjusted_mode)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
-	uint32_t mode_ctl = 0, mode_ctl2 = 0;
-	int ret;
-
-	NV_DEBUG(drm, "or %d type %d crtc %d\n",
-		     nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-
-	nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
-	if (crtc->index == 1)
-		mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
-	else
-		mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
-
-	/* Lacking a working tv-out, this is not a 100% sure. */
-	if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
-		mode_ctl |= 0x40;
-	else
-	if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
-		mode_ctl |= 0x100;
-
-	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-		mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
-
-	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-		mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
-
-	ret = RING_SPACE(evo, 3);
-	if (ret) {
-		NV_ERROR(drm, "no space while connecting DAC\n");
-		return;
-	}
-	BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
-	OUT_RING(evo, mode_ctl);
-	OUT_RING(evo, mode_ctl2);
-
-	nv_encoder->crtc = encoder->crtc;
-}
-
-static struct drm_crtc *
-nv50_dac_crtc_get(struct drm_encoder *encoder)
-{
-	return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
-	.dpms = nv50_dac_dpms,
-	.save = nv50_dac_save,
-	.restore = nv50_dac_restore,
-	.mode_fixup = nv50_dac_mode_fixup,
-	.prepare = nv50_dac_disconnect,
-	.commit = nv50_dac_commit,
-	.mode_set = nv50_dac_mode_set,
-	.get_crtc = nv50_dac_crtc_get,
-	.detect = nv50_dac_detect,
-	.disable = nv50_dac_disconnect
-};
-
-static void
-nv50_dac_destroy(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
-	if (!encoder)
-		return;
-
-	NV_DEBUG(drm, "\n");
-
-	drm_encoder_cleanup(encoder);
-	kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
-	.destroy = nv50_dac_destroy,
-};
-
-int
-nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
-{
-	struct nouveau_encoder *nv_encoder;
-	struct drm_encoder *encoder;
-
-	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-	if (!nv_encoder)
-		return -ENOMEM;
-	encoder = to_drm_encoder(nv_encoder);
-
-	nv_encoder->dcb = entry;
-	nv_encoder->or = ffs(entry->or) - 1;
-
-	drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
-			 DRM_MODE_ENCODER_DAC);
-	drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
-
-	encoder->possible_crtcs = entry->heads;
-	encoder->possible_clones = 0;
-
-	drm_mode_connector_attach_encoder(connector, encoder);
-	return 0;
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f97b42cbb6bb..35874085a61e 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1,969 +1,2058 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
+	/*
+ * Copyright 2011 Red Hat Inc.
  *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
  *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
  *
+ * Authors: Ben Skeggs
  */
 
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
 #include "nouveau_drm.h"
 #include "nouveau_dma.h"
-
-#include "nv50_display.h"
-#include "nouveau_crtc.h"
-#include "nouveau_encoder.h"
+#include "nouveau_gem.h"
 #include "nouveau_connector.h"
-#include "nouveau_fbcon.h"
-#include <drm/drm_crtc_helper.h>
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
 #include "nouveau_fence.h"
+#include "nv50_display.h"
 
+#include <core/client.h>
 #include <core/gpuobj.h>
+#include <core/class.h>
+
 #include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
 
-static void nv50_display_bh(unsigned long);
+#define EVO_DMA_NR 9
 
-static inline int
-nv50_sor_nr(struct drm_device *dev)
+#define EVO_MASTER  (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
+#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
+
+#define EVO_CORE_HANDLE      (0xd1500000)
+#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
+#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
+#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) |                               \
+			      (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
+
+/******************************************************************************
+ * EVO channel
+ *****************************************************************************/
+
+struct nv50_chan {
+	struct nouveau_object *user;
+	u32 handle;
+};
+
+static int
+nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+		 void *data, u32 size, struct nv50_chan *chan)
 {
-	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
+	const u32 handle = EVO_CHAN_HANDLE(bclass, head);
+	int ret;
 
-	if (device->chipset  < 0x90 ||
-	    device->chipset == 0x92 ||
-	    device->chipset == 0xa0)
-		return 2;
+	ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
+				 oclass, data, size, &chan->user);
+	if (ret)
+		return ret;
 
-	return 4;
-}
-
-u32
-nv50_display_active_crtcs(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 mask = 0;
-	int i;
-
-	if (device->chipset  < 0x90 ||
-	    device->chipset == 0x92 ||
-	    device->chipset == 0xa0) {
-		for (i = 0; i < 2; i++)
-			mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
-	} else {
-		for (i = 0; i < 4; i++)
-			mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
-	}
-
-	for (i = 0; i < 3; i++)
-		mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
-
-	return mask & 3;
-}
-
-int
-nv50_display_early_init(struct drm_device *dev)
-{
+	chan->handle = handle;
 	return 0;
 }
 
-void
-nv50_display_late_takedown(struct drm_device *dev)
+static void
+nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
 {
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	if (chan->handle)
+		nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
 }
 
-int
-nv50_display_sync(struct drm_device *dev)
+/******************************************************************************
+ * PIO EVO channel
+ *****************************************************************************/
+
+struct nv50_pioc {
+	struct nv50_chan base;
+};
+
+static void
+nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
 {
-	struct nv50_display *disp = nv50_display(dev);
-	struct nouveau_channel *evo = disp->master;
+	nv50_chan_destroy(core, &pioc->base);
+}
+
+static int
+nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+		 void *data, u32 size, struct nv50_pioc *pioc)
+{
+	return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
+}
+
+/******************************************************************************
+ * DMA EVO channel
+ *****************************************************************************/
+
+struct nv50_dmac {
+	struct nv50_chan base;
+	dma_addr_t handle;
+	u32 *ptr;
+};
+
+static void
+nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+{
+	if (dmac->ptr) {
+		struct pci_dev *pdev = nv_device(core)->pdev;
+		pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
+	}
+
+	nv50_chan_destroy(core, &dmac->base);
+}
+
+static int
+nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+	struct nouveau_fb *pfb = nouveau_fb(core);
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	struct nouveau_object *object;
+	int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+				     NV_DMA_IN_MEMORY_CLASS,
+				     &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NV50_DMA_CONF0_ENABLE |
+					         NV50_DMA_CONF0_PART_256,
+				     }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB16,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
+					         NV50_DMA_CONF0_PART_256,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB32,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
+					         NV50_DMA_CONF0_PART_256,
+				 }, sizeof(struct nv_dma_class), &object);
+	return ret;
+}
+
+static int
+nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+	struct nouveau_fb *pfb = nouveau_fb(core);
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	struct nouveau_object *object;
+	int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+				     NV_DMA_IN_MEMORY_CLASS,
+				     &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVC0_DMA_CONF0_ENABLE,
+				     }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB16,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB32,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+				 }, sizeof(struct nv_dma_class), &object);
+	return ret;
+}
+
+static int
+nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+	struct nouveau_fb *pfb = nouveau_fb(core);
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	struct nouveau_object *object;
+	int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+				     NV_DMA_IN_MEMORY_CLASS,
+				     &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVD0_DMA_CONF0_ENABLE |
+						 NVD0_DMA_CONF0_PAGE_LP,
+				     }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB32,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
+						 NVD0_DMA_CONF0_PAGE_LP,
+				 }, sizeof(struct nv_dma_class), &object);
+	return ret;
+}
+
+static int
+nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+		 void *data, u32 size, u64 syncbuf,
+		 struct nv50_dmac *dmac)
+{
+	struct nouveau_fb *pfb = nouveau_fb(core);
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	struct nouveau_object *object;
+	u32 pushbuf = *(u32 *)data;
 	int ret;
 
-	ret = RING_SPACE(evo, 6);
-	if (ret == 0) {
-		BEGIN_NV04(evo, 0, 0x0084, 1);
-		OUT_RING  (evo, 0x80000000);
-		BEGIN_NV04(evo, 0, 0x0080, 1);
-		OUT_RING  (evo, 0);
-		BEGIN_NV04(evo, 0, 0x0084, 1);
-		OUT_RING  (evo, 0x00000000);
+	dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
+					&dmac->handle);
+	if (!dmac->ptr)
+		return -ENOMEM;
 
-		nv_wo32(disp->ramin, 0x2000, 0x00000000);
-		FIRE_RING (evo);
+	ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
+				 NV_DMA_FROM_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_PCI_US |
+						 NV_DMA_ACCESS_RD,
+					.start = dmac->handle + 0x0000,
+					.limit = dmac->handle + 0x0fff,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
 
-		if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
+	ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = syncbuf + 0x0000,
+					.limit = syncbuf + 0x0fff,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	if (nv_device(core)->card_type < NV_C0)
+		ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
+	else
+	if (nv_device(core)->card_type < NV_D0)
+		ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
+	else
+		ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
+	return ret;
+}
+
+struct nv50_mast {
+	struct nv50_dmac base;
+};
+
+struct nv50_curs {
+	struct nv50_pioc base;
+};
+
+struct nv50_sync {
+	struct nv50_dmac base;
+	struct {
+		u32 offset;
+		u16 value;
+	} sem;
+};
+
+struct nv50_ovly {
+	struct nv50_dmac base;
+};
+
+struct nv50_oimm {
+	struct nv50_pioc base;
+};
+
+struct nv50_head {
+	struct nouveau_crtc base;
+	struct nv50_curs curs;
+	struct nv50_sync sync;
+	struct nv50_ovly ovly;
+	struct nv50_oimm oimm;
+};
+
+#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
+#define nv50_curs(c) (&nv50_head(c)->curs)
+#define nv50_sync(c) (&nv50_head(c)->sync)
+#define nv50_ovly(c) (&nv50_head(c)->ovly)
+#define nv50_oimm(c) (&nv50_head(c)->oimm)
+#define nv50_chan(c) (&(c)->base.base)
+#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+
+struct nv50_disp {
+	struct nouveau_object *core;
+	struct nv50_mast mast;
+
+	u32 modeset;
+
+	struct nouveau_bo *sync;
+};
+
+static struct nv50_disp *
+nv50_disp(struct drm_device *dev)
+{
+	return nouveau_display(dev)->priv;
+}
+
+#define nv50_mast(d) (&nv50_disp(d)->mast)
+
+static struct drm_crtc *
+nv50_display_crtc_get(struct drm_encoder *encoder)
+{
+	return nouveau_encoder(encoder)->crtc;
+}
+
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
+static u32 *
+evo_wait(void *evoc, int nr)
+{
+	struct nv50_dmac *dmac = evoc;
+	u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
+
+	if (put + nr >= (PAGE_SIZE / 4) - 8) {
+		dmac->ptr[put] = 0x20000000;
+
+		nv_wo32(dmac->base.user, 0x0000, 0x00000000);
+		if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+			NV_ERROR(dmac->base.user, "channel stalled\n");
+			return NULL;
+		}
+
+		put = 0;
+	}
+
+	return dmac->ptr + put;
+}
+
+static void
+evo_kick(u32 *push, void *evoc)
+{
+	struct nv50_dmac *dmac = evoc;
+	nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+}
+
+#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+#define evo_data(p,d)   *((p)++) = (d)
+
+static bool
+evo_sync_wait(void *data)
+{
+	return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
+}
+
+static int
+evo_sync(struct drm_device *dev)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nv50_disp *disp = nv50_disp(dev);
+	struct nv50_mast *mast = nv50_mast(dev);
+	u32 *push = evo_wait(mast, 8);
+	if (push) {
+		nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+		evo_mthd(push, 0x0080, 2);
+		evo_data(push, 0x00000000);
+		evo_data(push, 0x00000000);
+		evo_kick(push, mast);
+		if (nv_wait_cb(device, evo_sync_wait, disp->sync))
 			return 0;
 	}
 
-	return 0;
-}
-
-int
-nv50_display_init(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_channel *evo;
-	int ret, i;
-	u32 val;
-
-	NV_DEBUG(drm, "\n");
-
-	nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
-
-	/*
-	 * I think the 0x006101XX range is some kind of main control area
-	 * that enables things.
-	 */
-	/* CRTC? */
-	for (i = 0; i < 2; i++) {
-		val = nv_rd32(device, 0x00616100 + (i * 0x800));
-		nv_wr32(device, 0x00610190 + (i * 0x10), val);
-		val = nv_rd32(device, 0x00616104 + (i * 0x800));
-		nv_wr32(device, 0x00610194 + (i * 0x10), val);
-		val = nv_rd32(device, 0x00616108 + (i * 0x800));
-		nv_wr32(device, 0x00610198 + (i * 0x10), val);
-		val = nv_rd32(device, 0x0061610c + (i * 0x800));
-		nv_wr32(device, 0x0061019c + (i * 0x10), val);
-	}
-
-	/* DAC */
-	for (i = 0; i < 3; i++) {
-		val = nv_rd32(device, 0x0061a000 + (i * 0x800));
-		nv_wr32(device, 0x006101d0 + (i * 0x04), val);
-	}
-
-	/* SOR */
-	for (i = 0; i < nv50_sor_nr(dev); i++) {
-		val = nv_rd32(device, 0x0061c000 + (i * 0x800));
-		nv_wr32(device, 0x006101e0 + (i * 0x04), val);
-	}
-
-	/* EXT */
-	for (i = 0; i < 3; i++) {
-		val = nv_rd32(device, 0x0061e000 + (i * 0x800));
-		nv_wr32(device, 0x006101f0 + (i * 0x04), val);
-	}
-
-	for (i = 0; i < 3; i++) {
-		nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
-			NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-		nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
-	}
-
-	/* The precise purpose is unknown, i suspect it has something to do
-	 * with text mode.
-	 */
-	if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
-		nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
-		nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
-		if (!nv_wait(device, 0x006194e8, 2, 0)) {
-			NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
-			NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
-						nv_rd32(device, 0x6194e8));
-			return -EBUSY;
-		}
-	}
-
-	for (i = 0; i < 2; i++) {
-		nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
-		if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
-			NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
-			NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
-				 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
-			return -EBUSY;
-		}
-
-		nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-			NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
-		if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
-			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
-			NV_ERROR(drm, "timeout: "
-				      "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
-			NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
-				 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
-			return -EBUSY;
-		}
-	}
-
-	nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
-	nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
-	nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
-	nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
-	nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
-		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
-		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
-		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
-
-	ret = nv50_evo_init(dev);
-	if (ret)
-		return ret;
-	evo = nv50_display(dev)->master;
-
-	nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
-
-	ret = RING_SPACE(evo, 3);
-	if (ret)
-		return ret;
-	BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
-	OUT_RING  (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
-	OUT_RING  (evo, NvEvoSync);
-
-	return nv50_display_sync(dev);
-}
-
-void
-nv50_display_fini(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nv50_display *disp = nv50_display(dev);
-	struct nouveau_channel *evo = disp->master;
-	struct drm_crtc *drm_crtc;
-	int ret, i;
-
-	NV_DEBUG(drm, "\n");
-
-	list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
-		struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
-
-		nv50_crtc_blank(crtc, true);
-	}
-
-	ret = RING_SPACE(evo, 2);
-	if (ret == 0) {
-		BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-		OUT_RING(evo, 0);
-	}
-	FIRE_RING(evo);
-
-	/* Almost like ack'ing a vblank interrupt, maybe in the spirit of
-	 * cleaning up?
-	 */
-	list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
-		struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
-		uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
-
-		if (!crtc->base.enabled)
-			continue;
-
-		nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
-		if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
-			NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
-				      "0x%08x\n", mask, mask);
-			NV_ERROR(drm, "0x610024 = 0x%08x\n",
-				 nv_rd32(device, NV50_PDISPLAY_INTR_1));
-		}
-	}
-
-	for (i = 0; i < 2; i++) {
-		nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
-		if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
-			NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
-			NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
-				 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
-		}
-	}
-
-	nv50_evo_fini(dev);
-
-	for (i = 0; i < 3; i++) {
-		if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
-			     NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
-			NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
-			NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
-				  nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
-		}
-	}
-
-	/* disable interrupts. */
-	nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
-}
-
-int
-nv50_display_create(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct dcb_table *dcb = &drm->vbios.dcb;
-	struct drm_connector *connector, *ct;
-	struct nv50_display *priv;
-	int ret, i;
-
-	NV_DEBUG(drm, "\n");
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	nouveau_display(dev)->priv = priv;
-	nouveau_display(dev)->dtor = nv50_display_destroy;
-	nouveau_display(dev)->init = nv50_display_init;
-	nouveau_display(dev)->fini = nv50_display_fini;
-
-	/* Create CRTC objects */
-	for (i = 0; i < 2; i++) {
-		ret = nv50_crtc_create(dev, i);
-		if (ret)
-			return ret;
-	}
-
-	/* We setup the encoders from the BIOS table */
-	for (i = 0 ; i < dcb->entries; i++) {
-		struct dcb_output *entry = &dcb->entry[i];
-
-		if (entry->location != DCB_LOC_ON_CHIP) {
-			NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
-				entry->type, ffs(entry->or) - 1);
-			continue;
-		}
-
-		connector = nouveau_connector_create(dev, entry->connector);
-		if (IS_ERR(connector))
-			continue;
-
-		switch (entry->type) {
-		case DCB_OUTPUT_TMDS:
-		case DCB_OUTPUT_LVDS:
-		case DCB_OUTPUT_DP:
-			nv50_sor_create(connector, entry);
-			break;
-		case DCB_OUTPUT_ANALOG:
-			nv50_dac_create(connector, entry);
-			break;
-		default:
-			NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
-			continue;
-		}
-	}
-
-	list_for_each_entry_safe(connector, ct,
-				 &dev->mode_config.connector_list, head) {
-		if (!connector->encoder_ids[0]) {
-			NV_WARN(drm, "%s has no encoders, removing\n",
-				drm_get_connector_name(connector));
-			connector->funcs->destroy(connector);
-		}
-	}
-
-	tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
-
-	ret = nv50_evo_create(dev);
-	if (ret) {
-		nv50_display_destroy(dev);
-		return ret;
-	}
-
-	return 0;
-}
-
-void
-nv50_display_destroy(struct drm_device *dev)
-{
-	struct nv50_display *disp = nv50_display(dev);
-
-	nv50_evo_destroy(dev);
-	kfree(disp);
+	return -EBUSY;
 }
 
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
 struct nouveau_bo *
 nv50_display_crtc_sema(struct drm_device *dev, int crtc)
 {
-	return nv50_display(dev)->crtc[crtc].sem.bo;
+	return nv50_disp(dev)->sync;
 }
 
 void
 nv50_display_flip_stop(struct drm_crtc *crtc)
 {
-	struct nv50_display *disp = nv50_display(crtc->dev);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
-	struct nouveau_channel *evo = dispc->sync;
-	int ret;
+	struct nv50_sync *sync = nv50_sync(crtc);
+	u32 *push;
 
-	ret = RING_SPACE(evo, 8);
-	if (ret) {
-		WARN_ON(1);
-		return;
+	push = evo_wait(sync, 8);
+	if (push) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x0094, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x0080, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, sync);
 	}
-
-	BEGIN_NV04(evo, 0, 0x0084, 1);
-	OUT_RING  (evo, 0x00000000);
-	BEGIN_NV04(evo, 0, 0x0094, 1);
-	OUT_RING  (evo, 0x00000000);
-	BEGIN_NV04(evo, 0, 0x00c0, 1);
-	OUT_RING  (evo, 0x00000000);
-	BEGIN_NV04(evo, 0, 0x0080, 1);
-	OUT_RING  (evo, 0x00000000);
-	FIRE_RING (evo);
 }
 
 int
 nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-		       struct nouveau_channel *chan)
+		       struct nouveau_channel *chan, u32 swap_interval)
 {
-	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
 	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
-	struct nv50_display *disp = nv50_display(crtc->dev);
+	struct nv50_disp *disp = nv50_disp(crtc->dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
-	struct nouveau_channel *evo = dispc->sync;
+	struct nv50_sync *sync = nv50_sync(crtc);
+	u32 *push;
 	int ret;
 
-	ret = RING_SPACE(evo, chan ? 25 : 27);
-	if (unlikely(ret))
-		return ret;
+	swap_interval <<= 4;
+	if (swap_interval == 0)
+		swap_interval |= 0x100;
+
+	push = evo_wait(sync, 128);
+	if (unlikely(push == NULL))
+		return -EBUSY;
 
 	/* synchronise with the rendering channel, if necessary */
 	if (likely(chan)) {
 		ret = RING_SPACE(chan, 10);
-		if (ret) {
-			WIND_RING(evo);
+		if (ret)
 			return ret;
-		}
 
-		if (nv_device(drm->device)->chipset < 0xc0) {
-			BEGIN_NV04(chan, 0, 0x0060, 2);
+		if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+			BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
 			OUT_RING  (chan, NvEvoSema0 + nv_crtc->index);
-			OUT_RING  (chan, dispc->sem.offset);
-			BEGIN_NV04(chan, 0, 0x006c, 1);
-			OUT_RING  (chan, 0xf00d0000 | dispc->sem.value);
-			BEGIN_NV04(chan, 0, 0x0064, 2);
-			OUT_RING  (chan, dispc->sem.offset ^ 0x10);
+			OUT_RING  (chan, sync->sem.offset);
+			BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+			OUT_RING  (chan, 0xf00d0000 | sync->sem.value);
+			BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+			OUT_RING  (chan, sync->sem.offset ^ 0x10);
 			OUT_RING  (chan, 0x74b1e000);
-			BEGIN_NV04(chan, 0, 0x0060, 1);
-			if (nv_device(drm->device)->chipset < 0x84)
+			BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+			if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
 				OUT_RING  (chan, NvSema);
 			else
 				OUT_RING  (chan, chan->vram);
 		} else {
 			u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
-			offset += dispc->sem.offset;
-			BEGIN_NVC0(chan, 0, 0x0010, 4);
+			offset += sync->sem.offset;
+
+			BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
 			OUT_RING  (chan, upper_32_bits(offset));
 			OUT_RING  (chan, lower_32_bits(offset));
-			OUT_RING  (chan, 0xf00d0000 | dispc->sem.value);
+			OUT_RING  (chan, 0xf00d0000 | sync->sem.value);
 			OUT_RING  (chan, 0x1002);
-			BEGIN_NVC0(chan, 0, 0x0010, 4);
+			BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
 			OUT_RING  (chan, upper_32_bits(offset));
 			OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
 			OUT_RING  (chan, 0x74b1e000);
 			OUT_RING  (chan, 0x1001);
 		}
+
 		FIRE_RING (chan);
 	} else {
-		nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
-				0xf00d0000 | dispc->sem.value);
+		nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
+				0xf00d0000 | sync->sem.value);
+		evo_sync(crtc->dev);
 	}
 
-	/* queue the flip on the crtc's "display sync" channel */
-	BEGIN_NV04(evo, 0, 0x0100, 1);
-	OUT_RING  (evo, 0xfffe0000);
-	if (chan) {
-		BEGIN_NV04(evo, 0, 0x0084, 1);
-		OUT_RING  (evo, 0x00000100);
+	/* queue the flip */
+	evo_mthd(push, 0x0100, 1);
+	evo_data(push, 0xfffe0000);
+	evo_mthd(push, 0x0084, 1);
+	evo_data(push, swap_interval);
+	if (!(swap_interval & 0x00000100)) {
+		evo_mthd(push, 0x00e0, 1);
+		evo_data(push, 0x40000000);
+	}
+	evo_mthd(push, 0x0088, 4);
+	evo_data(push, sync->sem.offset);
+	evo_data(push, 0xf00d0000 | sync->sem.value);
+	evo_data(push, 0x74b1e000);
+	evo_data(push, NvEvoSync);
+	evo_mthd(push, 0x00a0, 2);
+	evo_data(push, 0x00000000);
+	evo_data(push, 0x00000000);
+	evo_mthd(push, 0x00c0, 1);
+	evo_data(push, nv_fb->r_dma);
+	evo_mthd(push, 0x0110, 2);
+	evo_data(push, 0x00000000);
+	evo_data(push, 0x00000000);
+	if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+		evo_mthd(push, 0x0800, 5);
+		evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+		evo_data(push, 0);
+		evo_data(push, (fb->height << 16) | fb->width);
+		evo_data(push, nv_fb->r_pitch);
+		evo_data(push, nv_fb->r_format);
 	} else {
-		BEGIN_NV04(evo, 0, 0x0084, 1);
-		OUT_RING  (evo, 0x00000010);
-		/* allows gamma somehow, PDISP will bitch at you if
-		 * you don't wait for vblank before changing this..
-		 */
-		BEGIN_NV04(evo, 0, 0x00e0, 1);
-		OUT_RING  (evo, 0x40000000);
+		evo_mthd(push, 0x0400, 5);
+		evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+		evo_data(push, 0);
+		evo_data(push, (fb->height << 16) | fb->width);
+		evo_data(push, nv_fb->r_pitch);
+		evo_data(push, nv_fb->r_format);
 	}
-	BEGIN_NV04(evo, 0, 0x0088, 4);
-	OUT_RING  (evo, dispc->sem.offset);
-	OUT_RING  (evo, 0xf00d0000 | dispc->sem.value);
-	OUT_RING  (evo, 0x74b1e000);
-	OUT_RING  (evo, NvEvoSync);
-	BEGIN_NV04(evo, 0, 0x00a0, 2);
-	OUT_RING  (evo, 0x00000000);
-	OUT_RING  (evo, 0x00000000);
-	BEGIN_NV04(evo, 0, 0x00c0, 1);
-	OUT_RING  (evo, nv_fb->r_dma);
-	BEGIN_NV04(evo, 0, 0x0110, 2);
-	OUT_RING  (evo, 0x00000000);
-	OUT_RING  (evo, 0x00000000);
-	BEGIN_NV04(evo, 0, 0x0800, 5);
-	OUT_RING  (evo, nv_fb->nvbo->bo.offset >> 8);
-	OUT_RING  (evo, 0);
-	OUT_RING  (evo, (fb->height << 16) | fb->width);
-	OUT_RING  (evo, nv_fb->r_pitch);
-	OUT_RING  (evo, nv_fb->r_format);
-	BEGIN_NV04(evo, 0, 0x0080, 1);
-	OUT_RING  (evo, 0x00000000);
-	FIRE_RING (evo);
+	evo_mthd(push, 0x0080, 1);
+	evo_data(push, 0x00000000);
+	evo_kick(push, sync);
 
-	dispc->sem.offset ^= 0x10;
-	dispc->sem.value++;
+	sync->sem.offset ^= 0x10;
+	sync->sem.value++;
 	return 0;
 }
 
-static u16
-nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
-			   u32 mc, int pxclk)
+/******************************************************************************
+ * CRTC
+ *****************************************************************************/
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
 {
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_connector *nv_connector = NULL;
-	struct drm_encoder *encoder;
-	struct nvbios *bios = &drm->vbios;
-	u32 script = 0, or;
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	struct nouveau_connector *nv_connector;
+	struct drm_connector *connector;
+	u32 *push, mode = 0x00;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
-		if (nv_encoder->dcb != dcb)
-			continue;
-
-		nv_connector = nouveau_encoder_connector_get(nv_encoder);
-		break;
+	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+	connector = &nv_connector->base;
+	if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+		if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+			mode = DITHERING_MODE_DYNAMIC2X2;
+	} else {
+		mode = nv_connector->dithering_mode;
 	}
 
-	or = ffs(dcb->or) - 1;
-	switch (dcb->type) {
-	case DCB_OUTPUT_LVDS:
-		script = (mc >> 8) & 0xf;
-		if (bios->fp_no_ddc) {
-			if (bios->fp.dual_link)
-				script |= 0x0100;
-			if (bios->fp.if_is_24bit)
-				script |= 0x0200;
+	if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+		if (connector->display_info.bpc >= 8)
+			mode |= DITHERING_DEPTH_8BPC;
+	} else {
+		mode |= nv_connector->dithering_depth;
+	}
+
+	push = evo_wait(mast, 4);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
+			evo_data(push, mode);
+		} else
+		if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
+			evo_data(push, mode);
 		} else {
-			/* determine number of lvds links */
-			if (nv_connector && nv_connector->edid &&
-			    nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
-				/* http://www.spwg.org */
-				if (((u8 *)nv_connector->edid)[121] == 2)
-					script |= 0x0100;
-			} else
-			if (pxclk >= bios->fp.duallink_transition_clk) {
-				script |= 0x0100;
-			}
-
-			/* determine panel depth */
-			if (script & 0x0100) {
-				if (bios->fp.strapless_is_24bit & 2)
-					script |= 0x0200;
-			} else {
-				if (bios->fp.strapless_is_24bit & 1)
-					script |= 0x0200;
-			}
-
-			if (nv_connector && nv_connector->edid &&
-			    (nv_connector->edid->revision >= 4) &&
-			    (nv_connector->edid->input & 0x70) >= 0x20)
-				script |= 0x0200;
+			evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
+			evo_data(push, mode);
+		}
+
+		if (update) {
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, mast);
+	}
+
+	return 0;
+}
+
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+{
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
+	struct drm_crtc *crtc = &nv_crtc->base;
+	struct nouveau_connector *nv_connector;
+	int mode = DRM_MODE_SCALE_NONE;
+	u32 oX, oY, *push;
+
+	/* start off at the resolution we programmed the crtc for, this
+	 * effectively handles NONE/FULL scaling
+	 */
+	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+	if (nv_connector && nv_connector->native_mode)
+		mode = nv_connector->scaling_mode;
+
+	if (mode != DRM_MODE_SCALE_NONE)
+		omode = nv_connector->native_mode;
+	else
+		omode = umode;
+
+	oX = omode->hdisplay;
+	oY = omode->vdisplay;
+	if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+		oY *= 2;
+
+	/* add overscan compensation if necessary, will keep the aspect
+	 * ratio the same as the backend mode unless overridden by the
+	 * user setting both hborder and vborder properties.
+	 */
+	if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+			     (nv_connector->underscan == UNDERSCAN_AUTO &&
+			      nv_connector->edid &&
+			      drm_detect_hdmi_monitor(nv_connector->edid)))) {
+		u32 bX = nv_connector->underscan_hborder;
+		u32 bY = nv_connector->underscan_vborder;
+		u32 aspect = (oY << 19) / oX;
+
+		if (bX) {
+			oX -= (bX * 2);
+			if (bY) oY -= (bY * 2);
+			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+		} else {
+			oX -= (oX >> 4) + 32;
+			if (bY) oY -= (bY * 2);
+			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+		}
+	}
+
+	/* handle CENTER/ASPECT scaling, taking into account the areas
+	 * removed already for overscan compensation
+	 */
+	switch (mode) {
+	case DRM_MODE_SCALE_CENTER:
+		oX = min((u32)umode->hdisplay, oX);
+		oY = min((u32)umode->vdisplay, oY);
+		/* fall-through */
+	case DRM_MODE_SCALE_ASPECT:
+		if (oY < oX) {
+			u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+			oX = ((oY * aspect) + (aspect / 2)) >> 19;
+		} else {
+			u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+			oY = ((oX * aspect) + (aspect / 2)) >> 19;
 		}
-		break;
-	case DCB_OUTPUT_TMDS:
-		script = (mc >> 8) & 0xf;
-		if (pxclk >= 165000)
-			script |= 0x0100;
-		break;
-	case DCB_OUTPUT_DP:
-		script = (mc >> 8) & 0xf;
-		break;
-	case DCB_OUTPUT_ANALOG:
-		script = 0xff;
 		break;
 	default:
-		NV_ERROR(drm, "modeset on unsupported output type!\n");
 		break;
 	}
 
-	return script;
+	push = evo_wait(mast, 8);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			/*XXX: SCALE_CTRL_ACTIVE??? */
+			evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, (oY << 16) | oX);
+			evo_data(push, (oY << 16) | oX);
+			evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+		} else {
+			evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+			evo_data(push, (oY << 16) | oX);
+			evo_data(push, (oY << 16) | oX);
+			evo_data(push, (oY << 16) | oX);
+			evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+		}
+
+		evo_kick(push, mast);
+
+		if (update) {
+			nv50_display_flip_stop(crtc);
+			nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+		}
+	}
+
+	return 0;
+}
+
+static int
+nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
+{
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	u32 *push, hue, vib;
+	int adj;
+
+	adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
+	vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
+	hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+
+	push = evo_wait(mast, 16);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, (hue << 20) | (vib << 8));
+		} else {
+			evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, (hue << 20) | (vib << 8));
+		}
+
+		if (update) {
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, mast);
+	}
+
+	return 0;
+}
+
+static int
+nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+		    int x, int y, bool update)
+{
+	struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	u32 *push;
+
+	push = evo_wait(mast, 16);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, nvfb->nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
+			evo_data(push, (fb->height << 16) | fb->width);
+			evo_data(push, nvfb->r_pitch);
+			evo_data(push, nvfb->r_format);
+			evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, (y << 16) | x);
+			if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+				evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+				evo_data(push, nvfb->r_dma);
+			}
+		} else {
+			evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, nvfb->nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
+			evo_data(push, (fb->height << 16) | fb->width);
+			evo_data(push, nvfb->r_pitch);
+			evo_data(push, nvfb->r_format);
+			evo_data(push, nvfb->r_dma);
+			evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, (y << 16) | x);
+		}
+
+		if (update) {
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, mast);
+	}
+
+	nv_crtc->fb.tile_flags = nvfb->r_dma;
+	return 0;
 }
 
 static void
-nv50_display_unk10_handler(struct drm_device *dev)
+nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
 {
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_display *disp = nv50_display(dev);
-	u32 unk30 = nv_rd32(device, 0x610030), mc;
-	int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
-	NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
-	disp->irq.dcb = NULL;
-
-	nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
-
-	/* Determine which CRTC we're dealing with, only 1 ever will be
-	 * signalled at the same time with the current nouveau code.
-	 */
-	crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
-	if (crtc < 0)
-		goto ack;
-
-	/* Nothing needs to be done for the encoder */
-	crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
-	if (crtc < 0)
-		goto ack;
-
-	/* Find which encoder was connected to the CRTC */
-	for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
-		mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
-		NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
-		if (!(mc & (1 << crtc)))
-			continue;
-
-		switch ((mc & 0x00000f00) >> 8) {
-		case 0: type = DCB_OUTPUT_ANALOG; break;
-		case 1: type = DCB_OUTPUT_TV; break;
-		default:
-			NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
-			goto ack;
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	u32 *push = evo_wait(mast, 16);
+	if (push) {
+		if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0x85000000);
+			evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+		} else
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0x85000000);
+			evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, NvEvoVRAM);
+		} else {
+			evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+			evo_data(push, 0x85000000);
+			evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, NvEvoVRAM);
 		}
-
-		or = i;
+		evo_kick(push, mast);
 	}
-
-	for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
-		if (nv_device(drm->device)->chipset  < 0x90 ||
-		    nv_device(drm->device)->chipset == 0x92 ||
-		    nv_device(drm->device)->chipset == 0xa0)
-			mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
-		else
-			mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
-
-		NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
-		if (!(mc & (1 << crtc)))
-			continue;
-
-		switch ((mc & 0x00000f00) >> 8) {
-		case 0: type = DCB_OUTPUT_LVDS; break;
-		case 1: type = DCB_OUTPUT_TMDS; break;
-		case 2: type = DCB_OUTPUT_TMDS; break;
-		case 5: type = DCB_OUTPUT_TMDS; break;
-		case 8: type = DCB_OUTPUT_DP; break;
-		case 9: type = DCB_OUTPUT_DP; break;
-		default:
-			NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
-			goto ack;
-		}
-
-		or = i;
-	}
-
-	/* There was no encoder to disable */
-	if (type == DCB_OUTPUT_ANY)
-		goto ack;
-
-	/* Disable the encoder */
-	for (i = 0; i < drm->vbios.dcb.entries; i++) {
-		struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
-
-		if (dcb->type == type && (dcb->or & (1 << or))) {
-			nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
-			disp->irq.dcb = dcb;
-			goto ack;
-		}
-	}
-
-	NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-ack:
-	nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
-	nv_wr32(device, 0x610030, 0x80000000);
 }
 
 static void
-nv50_display_unk20_handler(struct drm_device *dev)
+nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
 {
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_display *disp = nv50_display(dev);
-	u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
-	struct dcb_output *dcb;
-	int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
-	NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
-	dcb = disp->irq.dcb;
-	if (dcb) {
-		nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
-		disp->irq.dcb = NULL;
-	}
-
-	/* CRTC clock change requested? */
-	crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
-	if (crtc >= 0) {
-		pclk  = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
-		pclk &= 0x003fffff;
-		if (pclk)
-			nv50_crtc_set_clock(dev, crtc, pclk);
-
-		tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
-		tmp &= ~0x000000f;
-		nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
-	}
-
-	/* Nothing needs to be done for the encoder */
-	crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
-	if (crtc < 0)
-		goto ack;
-	pclk  = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
-
-	/* Find which encoder is connected to the CRTC */
-	for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
-		mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
-		NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
-		if (!(mc & (1 << crtc)))
-			continue;
-
-		switch ((mc & 0x00000f00) >> 8) {
-		case 0: type = DCB_OUTPUT_ANALOG; break;
-		case 1: type = DCB_OUTPUT_TV; break;
-		default:
-			NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
-			goto ack;
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	u32 *push = evo_wait(mast, 16);
+	if (push) {
+		if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x05000000);
+		} else
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x05000000);
+			evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+		} else {
+			evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x05000000);
+			evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000);
 		}
-
-		or = i;
+		evo_kick(push, mast);
 	}
-
-	for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
-		if (nv_device(drm->device)->chipset  < 0x90 ||
-		    nv_device(drm->device)->chipset == 0x92 ||
-		    nv_device(drm->device)->chipset == 0xa0)
-			mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
-		else
-			mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
-
-		NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
-		if (!(mc & (1 << crtc)))
-			continue;
-
-		switch ((mc & 0x00000f00) >> 8) {
-		case 0: type = DCB_OUTPUT_LVDS; break;
-		case 1: type = DCB_OUTPUT_TMDS; break;
-		case 2: type = DCB_OUTPUT_TMDS; break;
-		case 5: type = DCB_OUTPUT_TMDS; break;
-		case 8: type = DCB_OUTPUT_DP; break;
-		case 9: type = DCB_OUTPUT_DP; break;
-		default:
-			NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
-			goto ack;
-		}
-
-		or = i;
-	}
-
-	if (type == DCB_OUTPUT_ANY)
-		goto ack;
-
-	/* Enable the encoder */
-	for (i = 0; i < drm->vbios.dcb.entries; i++) {
-		dcb = &drm->vbios.dcb.entry[i];
-		if (dcb->type == type && (dcb->or & (1 << or)))
-			break;
-	}
-
-	if (i == drm->vbios.dcb.entries) {
-		NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-		goto ack;
-	}
-
-	script = nv50_display_script_select(dev, dcb, mc, pclk);
-	nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
-
-	if (type == DCB_OUTPUT_DP) {
-		int link = !(dcb->dpconf.sor.link & 1);
-		if ((mc & 0x000f0000) == 0x00020000)
-			nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
-		else
-			nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
-	}
-
-	if (dcb->type != DCB_OUTPUT_ANALOG) {
-		tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
-		tmp &= ~0x00000f0f;
-		if (script & 0x0100)
-			tmp |= 0x00000101;
-		nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
-	} else {
-		nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
-	}
-
-	disp->irq.dcb = dcb;
-	disp->irq.pclk = pclk;
-	disp->irq.script = script;
-
-ack:
-	nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
-	nv_wr32(device, 0x610030, 0x80000000);
 }
 
-/* If programming a TMDS output on a SOR that can also be configured for
- * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
- *
- * It looks like the VBIOS TMDS scripts make an attempt at this, however,
- * the VBIOS scripts on at least one board I have only switch it off on
- * link 0, causing a blank display if the output has previously been
- * programmed for DisplayPort.
- */
 static void
-nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
+nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
 {
-	struct nouveau_device *device = nouveau_dev(dev);
-	int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+
+	if (show)
+		nv50_crtc_cursor_show(nv_crtc);
+	else
+		nv50_crtc_cursor_hide(nv_crtc);
+
+	if (update) {
+		u32 *push = evo_wait(mast, 2);
+		if (push) {
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+			evo_kick(push, mast);
+		}
+	}
+}
+
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv50_mast *mast = nv50_mast(crtc->dev);
+	u32 *push;
+
+	nv50_display_flip_stop(crtc);
+
+	push = evo_wait(mast, 2);
+	if (push) {
+		if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x40000000);
+		} else
+		if (nv50_vers(mast) <  NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x40000000);
+			evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+		} else {
+			evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x03000000);
+			evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000);
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv50_crtc_cursor_show_hide(nv_crtc, false, false);
+}
+
+static void
+nv50_crtc_commit(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv50_mast *mast = nv50_mast(crtc->dev);
+	u32 *push;
+
+	push = evo_wait(mast, 32);
+	if (push) {
+		if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, NvEvoVRAM_LP);
+			evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0xc0000000);
+			evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+		} else
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, nv_crtc->fb.tile_flags);
+			evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0xc0000000);
+			evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, NvEvoVRAM);
+		} else {
+			evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, nv_crtc->fb.tile_flags);
+			evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+			evo_data(push, 0x83000000);
+			evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+			evo_data(push, 0x00000000);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, NvEvoVRAM);
+			evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0xffffff00);
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+	nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
+		     struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int
+nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+	struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+	int ret;
+
+	ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	if (old_fb) {
+		nvfb = nouveau_framebuffer(old_fb);
+		nouveau_bo_unpin(nvfb->nvbo);
+	}
+
+	return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+		   struct drm_display_mode *mode, int x, int y,
+		   struct drm_framebuffer *old_fb)
+{
+	struct nv50_mast *mast = nv50_mast(crtc->dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_connector *nv_connector;
+	u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+	u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+	u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+	u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+	u32 vblan2e = 0, vblan2s = 1;
+	u32 *push;
+	int ret;
+
+	hactive = mode->htotal;
+	hsynce  = mode->hsync_end - mode->hsync_start - 1;
+	hbackp  = mode->htotal - mode->hsync_end;
+	hblanke = hsynce + hbackp;
+	hfrontp = mode->hsync_start - mode->hdisplay;
+	hblanks = mode->htotal - hfrontp - 1;
+
+	vactive = mode->vtotal * vscan / ilace;
+	vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+	vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+	vblanke = vsynce + vbackp;
+	vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+	vblanks = vactive - vfrontp - 1;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		vblan2e = vactive + vsynce + vbackp;
+		vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+		vactive = (vactive * 2) + 1;
+	}
+
+	ret = nv50_crtc_swap_fbs(crtc, old_fb);
+	if (ret)
+		return ret;
+
+	push = evo_wait(mast, 64);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0x00800000 | mode->clock);
+			evo_data(push, (ilace == 2) ? 2 : 0);
+			evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
+			evo_data(push, 0x00000000);
+			evo_data(push, (vactive << 16) | hactive);
+			evo_data(push, ( vsynce << 16) | hsynce);
+			evo_data(push, (vblanke << 16) | hblanke);
+			evo_data(push, (vblanks << 16) | hblanks);
+			evo_data(push, (vblan2e << 16) | vblan2s);
+			evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0x00000311);
+			evo_data(push, 0x00000100);
+		} else {
+			evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
+			evo_data(push, 0x00000000);
+			evo_data(push, (vactive << 16) | hactive);
+			evo_data(push, ( vsynce << 16) | hsynce);
+			evo_data(push, (vblanke << 16) | hblanke);
+			evo_data(push, (vblanks << 16) | hblanks);
+			evo_data(push, (vblan2e << 16) | vblan2s);
+			evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000); /* ??? */
+			evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+			evo_data(push, mode->clock * 1000);
+			evo_data(push, 0x00200000); /* ??? */
+			evo_data(push, mode->clock * 1000);
+			evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+			evo_data(push, 0x00000311);
+			evo_data(push, 0x00000100);
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+	nv50_crtc_set_dither(nv_crtc, false);
+	nv50_crtc_set_scale(nv_crtc, false);
+	nv50_crtc_set_color_vibrance(nv_crtc, false);
+	nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+	return 0;
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			struct drm_framebuffer *old_fb)
+{
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	int ret;
+
+	if (!crtc->fb) {
+		NV_DEBUG(drm, "No FB bound\n");
+		return 0;
+	}
+
+	ret = nv50_crtc_swap_fbs(crtc, old_fb);
+	if (ret)
+		return ret;
+
+	nv50_display_flip_stop(crtc);
+	nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+	nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+	return 0;
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+			       struct drm_framebuffer *fb, int x, int y,
+			       enum mode_set_atomic state)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	nv50_display_flip_stop(crtc);
+	nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+	return 0;
+}
+
+static void
+nv50_crtc_lut_load(struct drm_crtc *crtc)
+{
+	struct nv50_disp *disp = nv50_disp(crtc->dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+	int i;
+
+	for (i = 0; i < 256; i++) {
+		u16 r = nv_crtc->lut.r[i] >> 2;
+		u16 g = nv_crtc->lut.g[i] >> 2;
+		u16 b = nv_crtc->lut.b[i] >> 2;
+
+		if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+			writew(r + 0x0000, lut + (i * 0x08) + 0);
+			writew(g + 0x0000, lut + (i * 0x08) + 2);
+			writew(b + 0x0000, lut + (i * 0x08) + 4);
+		} else {
+			writew(r + 0x6000, lut + (i * 0x20) + 0);
+			writew(g + 0x6000, lut + (i * 0x20) + 2);
+			writew(b + 0x6000, lut + (i * 0x20) + 4);
+		}
+	}
+}
+
+static int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+		     uint32_t handle, uint32_t width, uint32_t height)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_gem_object *gem;
+	struct nouveau_bo *nvbo;
+	bool visible = (handle != 0);
+	int i, ret = 0;
+
+	if (visible) {
+		if (width != 64 || height != 64)
+			return -EINVAL;
+
+		gem = drm_gem_object_lookup(dev, file_priv, handle);
+		if (unlikely(!gem))
+			return -ENOENT;
+		nvbo = nouveau_gem_object(gem);
+
+		ret = nouveau_bo_map(nvbo);
+		if (ret == 0) {
+			for (i = 0; i < 64 * 64; i++) {
+				u32 v = nouveau_bo_rd32(nvbo, i);
+				nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
+			}
+			nouveau_bo_unmap(nvbo);
+		}
+
+		drm_gem_object_unreference_unlocked(gem);
+	}
+
+	if (visible != nv_crtc->cursor.visible) {
+		nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
+		nv_crtc->cursor.visible = visible;
+	}
+
+	return ret;
+}
+
+static int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct nv50_curs *curs = nv50_curs(crtc);
+	struct nv50_chan *chan = nv50_chan(curs);
+	nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
+	nv_wo32(chan->user, 0x0080, 0x00000000);
+	return 0;
+}
+
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+		    uint32_t start, uint32_t size)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	u32 end = max(start + size, (u32)256);
+	u32 i;
+
+	for (i = start; i < end; i++) {
+		nv_crtc->lut.r[i] = r[i];
+		nv_crtc->lut.g[i] = g[i];
+		nv_crtc->lut.b[i] = b[i];
+	}
+
+	nv50_crtc_lut_load(crtc);
+}
+
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv50_disp *disp = nv50_disp(crtc->dev);
+	struct nv50_head *head = nv50_head(crtc);
+	nv50_dmac_destroy(disp->core, &head->ovly.base);
+	nv50_pioc_destroy(disp->core, &head->oimm.base);
+	nv50_dmac_destroy(disp->core, &head->sync.base);
+	nv50_pioc_destroy(disp->core, &head->curs.base);
+	nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+	if (nv_crtc->cursor.nvbo)
+		nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+	nouveau_bo_unmap(nv_crtc->lut.nvbo);
+	if (nv_crtc->lut.nvbo)
+		nouveau_bo_unpin(nv_crtc->lut.nvbo);
+	nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+	drm_crtc_cleanup(crtc);
+	kfree(crtc);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
+	.dpms = nv50_crtc_dpms,
+	.prepare = nv50_crtc_prepare,
+	.commit = nv50_crtc_commit,
+	.mode_fixup = nv50_crtc_mode_fixup,
+	.mode_set = nv50_crtc_mode_set,
+	.mode_set_base = nv50_crtc_mode_set_base,
+	.mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
+	.load_lut = nv50_crtc_lut_load,
+};
+
+static const struct drm_crtc_funcs nv50_crtc_func = {
+	.cursor_set = nv50_crtc_cursor_set,
+	.cursor_move = nv50_crtc_cursor_move,
+	.gamma_set = nv50_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = nv50_crtc_destroy,
+	.page_flip = nouveau_crtc_page_flip,
+};
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+}
+
+static int
+nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+{
+	struct nv50_disp *disp = nv50_disp(dev);
+	struct nv50_head *head;
+	struct drm_crtc *crtc;
+	int ret, i;
+
+	head = kzalloc(sizeof(*head), GFP_KERNEL);
+	if (!head)
+		return -ENOMEM;
+
+	head->base.index = index;
+	head->base.set_dither = nv50_crtc_set_dither;
+	head->base.set_scale = nv50_crtc_set_scale;
+	head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
+	head->base.color_vibrance = 50;
+	head->base.vibrant_hue = 0;
+	head->base.cursor.set_offset = nv50_cursor_set_offset;
+	head->base.cursor.set_pos = nv50_cursor_set_pos;
+	for (i = 0; i < 256; i++) {
+		head->base.lut.r[i] = i << 8;
+		head->base.lut.g[i] = i << 8;
+		head->base.lut.b[i] = i << 8;
+	}
+
+	crtc = &head->base.base;
+	drm_crtc_init(dev, crtc, &nv50_crtc_func);
+	drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &head->base.lut.nvbo);
+	if (!ret) {
+		ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret) {
+			ret = nouveau_bo_map(head->base.lut.nvbo);
+			if (ret)
+				nouveau_bo_unpin(head->base.lut.nvbo);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &head->base.lut.nvbo);
+	}
+
+	if (ret)
+		goto out;
+
+	nv50_crtc_lut_load(crtc);
+
+	/* allocate cursor resources */
+	ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
+			      &(struct nv50_display_curs_class) {
+					.head = index,
+			      }, sizeof(struct nv50_display_curs_class),
+			      &head->curs.base);
+	if (ret)
+		goto out;
+
+	ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &head->base.cursor.nvbo);
+	if (!ret) {
+		ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret) {
+			ret = nouveau_bo_map(head->base.cursor.nvbo);
+			if (ret)
+				nouveau_bo_unpin(head->base.lut.nvbo);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
+	}
+
+	if (ret)
+		goto out;
+
+	/* allocate page flip / sync resources */
+	ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
+			      &(struct nv50_display_sync_class) {
+					.pushbuf = EVO_PUSH_HANDLE(SYNC, index),
+					.head = index,
+			      }, sizeof(struct nv50_display_sync_class),
+			      disp->sync->bo.offset, &head->sync.base);
+	if (ret)
+		goto out;
+
+	head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+
+	/* allocate overlay resources */
+	ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
+			      &(struct nv50_display_oimm_class) {
+					.head = index,
+			      }, sizeof(struct nv50_display_oimm_class),
+			      &head->oimm.base);
+	if (ret)
+		goto out;
+
+	ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
+			      &(struct nv50_display_ovly_class) {
+					.pushbuf = EVO_PUSH_HANDLE(OVLY, index),
+					.head = index,
+			      }, sizeof(struct nv50_display_ovly_class),
+			      disp->sync->bo.offset, &head->ovly.base);
+	if (ret)
+		goto out;
+
+out:
+	if (ret)
+		nv50_crtc_destroy(crtc);
+	return ret;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	int or = nv_encoder->or;
+	u32 dpms_ctrl;
+
+	dpms_ctrl = 0x00000000;
+	if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
+		dpms_ctrl |= 0x00000001;
+	if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
+		dpms_ctrl |= 0x00000004;
+
+	nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+		    const struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *nv_connector;
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	if (nv_connector && nv_connector->native_mode) {
+		if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+			int id = adjusted_mode->base.id;
+			*adjusted_mode = *nv_connector->native_mode;
+			adjusted_mode->base.id = id;
+		}
+	}
+
+	return true;
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	u32 *push;
+
+	nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	push = evo_wait(mast, 8);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			u32 syncs = 0x00000000;
+
+			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+				syncs |= 0x00000001;
+			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+				syncs |= 0x00000002;
+
+			evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
+			evo_data(push, 1 << nv_crtc->index);
+			evo_data(push, syncs);
+		} else {
+			u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+			u32 syncs = 0x00000001;
+
+			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+				syncs |= 0x00000008;
+			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+				syncs |= 0x00000010;
+
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				magic |= 0x00000001;
+
+			evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+			evo_data(push, syncs);
+			evo_data(push, magic);
+			evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
+			evo_data(push, 1 << nv_crtc->index);
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_dac_disconnect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	const int or = nv_encoder->or;
+	u32 *push;
+
+	if (nv_encoder->crtc) {
+		nv50_crtc_prepare(nv_encoder->crtc);
+
+		push = evo_wait(mast, 4);
+		if (push) {
+			if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+				evo_mthd(push, 0x0400 + (or * 0x080), 1);
+				evo_data(push, 0x00000000);
+			} else {
+				evo_mthd(push, 0x0180 + (or * 0x020), 1);
+				evo_data(push, 0x00000000);
+			}
+
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+			evo_kick(push, mast);
+		}
+	}
+
+	nv_encoder->crtc = NULL;
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	int ret, or = nouveau_encoder(encoder)->or;
+	u32 load = 0;
+
+	ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
+	if (ret || load != 7)
+		return connector_status_disconnected;
+
+	return connector_status_connected;
+}
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
+	.dpms = nv50_dac_dpms,
+	.mode_fixup = nv50_dac_mode_fixup,
+	.prepare = nv50_dac_disconnect,
+	.commit = nv50_dac_commit,
+	.mode_set = nv50_dac_mode_set,
+	.disable = nv50_dac_disconnect,
+	.get_crtc = nv50_display_crtc_get,
+	.detect = nv50_dac_detect
+};
+
+static const struct drm_encoder_funcs nv50_dac_func = {
+	.destroy = nv50_dac_destroy,
+};
+
+static int
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
-	u32 tmp;
 
-	if (dcb->type != DCB_OUTPUT_TMDS)
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+	nv_encoder->dcb = dcbe;
+	nv_encoder->or = ffs(dcbe->or) - 1;
+
+	encoder = to_drm_encoder(nv_encoder);
+	encoder->possible_crtcs = dcbe->heads;
+	encoder->possible_clones = 0;
+	drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+	return 0;
+}
+
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *nv_connector;
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	if (!drm_detect_monitor_audio(nv_connector->edid))
 		return;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
 
-		if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
-		    nv_encoder->dcb->or & (1 << or)) {
-			tmp  = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
-			tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
-			nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
+	nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
+			    nv_connector->base.eld,
+			    nv_connector->base.eld[2] * 4);
+}
+
+static void
+nv50_audio_disconnect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+	nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nouveau_connector *nv_connector;
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+	u32 rekey = 56; /* binary driver, and tegra constant */
+	u32 max_ac_packet;
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	if (!drm_detect_hdmi_monitor(nv_connector->edid))
+		return;
+
+	max_ac_packet  = mode->htotal - mode->hdisplay;
+	max_ac_packet -= rekey;
+	max_ac_packet -= 18; /* constant from tegra */
+	max_ac_packet /= 32;
+
+	nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
+			    NV84_DISP_SOR_HDMI_PWR_STATE_ON |
+			    (max_ac_packet << 16) | rekey);
+
+	nv50_audio_mode_set(encoder, mode);
+}
+
+static void
+nv50_hdmi_disconnect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+
+	nv50_audio_disconnect(encoder);
+
+	nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct nv50_disp *disp = nv50_disp(dev);
+	struct drm_encoder *partner;
+	int or = nv_encoder->or;
+
+	nv_encoder->last_dpms = mode;
+
+	list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
+
+		if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
+			continue;
+
+		if (nv_partner != nv_encoder &&
+		    nv_partner->dcb->or == nv_encoder->dcb->or) {
+			if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
+				return;
 			break;
 		}
 	}
+
+	nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
+
+	if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+		nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
 }
 
-static void
-nv50_display_unk40_handler(struct drm_device *dev)
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+		    const struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
 {
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_display *disp = nv50_display(dev);
-	struct dcb_output *dcb = disp->irq.dcb;
-	u16 script = disp->irq.script;
-	u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *nv_connector;
 
-	NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
-	disp->irq.dcb = NULL;
-	if (!dcb)
-		goto ack;
-
-	nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
-	nv50_display_unk40_dp_set_tmds(dev, dcb);
-
-ack:
-	nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
-	nv_wr32(device, 0x610030, 0x80000000);
-	nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
-}
-
-static void
-nv50_display_bh(unsigned long data)
-{
-	struct drm_device *dev = (struct drm_device *)data;
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	for (;;) {
-		uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
-		uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
-
-		NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
-
-		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
-			nv50_display_unk10_handler(dev);
-		else
-		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
-			nv50_display_unk20_handler(dev);
-		else
-		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
-			nv50_display_unk40_handler(dev);
-		else
-			break;
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	if (nv_connector && nv_connector->native_mode) {
+		if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+			int id = adjusted_mode->base.id;
+			*adjusted_mode = *nv_connector->native_mode;
+			adjusted_mode->base.id = id;
+		}
 	}
 
-	nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
+	return true;
 }
 
 static void
-nv50_display_error_handler(struct drm_device *dev)
+nv50_sor_disconnect(struct drm_encoder *encoder)
 {
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
-	u32 addr, data;
-	int chid;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	const int or = nv_encoder->or;
+	u32 *push;
 
-	for (chid = 0; chid < 5; chid++) {
-		if (!(channels & (1 << chid)))
-			continue;
+	if (nv_encoder->crtc) {
+		nv50_crtc_prepare(nv_encoder->crtc);
 
-		nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
-		addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
-		data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
-		NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
-			      "(0x%04x 0x%02x)\n", chid,
-			 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+		push = evo_wait(mast, 4);
+		if (push) {
+			if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+				evo_mthd(push, 0x0600 + (or * 0x40), 1);
+				evo_data(push, 0x00000000);
+			} else {
+				evo_mthd(push, 0x0200 + (or * 0x20), 1);
+				evo_data(push, 0x00000000);
+			}
 
-		nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+			evo_kick(push, mast);
+		}
+
+		nv50_hdmi_disconnect(encoder);
 	}
+
+	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+	nv_encoder->crtc = NULL;
+}
+
+static void
+nv50_sor_prepare(struct drm_encoder *encoder)
+{
+	nv50_sor_disconnect(encoder);
+	if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
+		evo_sync(encoder->dev);
+}
+
+static void
+nv50_sor_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+		  struct drm_display_mode *mode)
+{
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nouveau_connector *nv_connector;
+	struct nvbios *bios = &drm->vbios;
+	u32 *push, lvds = 0;
+	u8 owner = 1 << nv_crtc->index;
+	u8 proto = 0xf;
+	u8 depth = 0x0;
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	switch (nv_encoder->dcb->type) {
+	case DCB_OUTPUT_TMDS:
+		if (nv_encoder->dcb->sorconf.link & 1) {
+			if (mode->clock < 165000)
+				proto = 0x1;
+			else
+				proto = 0x5;
+		} else {
+			proto = 0x2;
+		}
+
+		nv50_hdmi_mode_set(encoder, mode);
+		break;
+	case DCB_OUTPUT_LVDS:
+		proto = 0x0;
+
+		if (bios->fp_no_ddc) {
+			if (bios->fp.dual_link)
+				lvds |= 0x0100;
+			if (bios->fp.if_is_24bit)
+				lvds |= 0x0200;
+		} else {
+			if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+				if (((u8 *)nv_connector->edid)[121] == 2)
+					lvds |= 0x0100;
+			} else
+			if (mode->clock >= bios->fp.duallink_transition_clk) {
+				lvds |= 0x0100;
+			}
+
+			if (lvds & 0x0100) {
+				if (bios->fp.strapless_is_24bit & 2)
+					lvds |= 0x0200;
+			} else {
+				if (bios->fp.strapless_is_24bit & 1)
+					lvds |= 0x0200;
+			}
+
+			if (nv_connector->base.display_info.bpc == 8)
+				lvds |= 0x0200;
+		}
+
+		nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+		break;
+	case DCB_OUTPUT_DP:
+		if (nv_connector->base.display_info.bpc == 6) {
+			nv_encoder->dp.datarate = mode->clock * 18 / 8;
+			depth = 0x2;
+		} else
+		if (nv_connector->base.display_info.bpc == 8) {
+			nv_encoder->dp.datarate = mode->clock * 24 / 8;
+			depth = 0x5;
+		} else {
+			nv_encoder->dp.datarate = mode->clock * 30 / 8;
+			depth = 0x6;
+		}
+
+		if (nv_encoder->dcb->sorconf.link & 1)
+			proto = 0x8;
+		else
+			proto = 0x9;
+		break;
+	default:
+		BUG_ON(1);
+		break;
+	}
+
+	nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	push = evo_wait(nv50_mast(dev), 8);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+			evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
+			evo_data(push, (depth << 16) | (proto << 8) | owner);
+		} else {
+			u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+			u32 syncs = 0x00000001;
+
+			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+				syncs |= 0x00000008;
+			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+				syncs |= 0x00000010;
+
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				magic |= 0x00000001;
+
+			evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+			evo_data(push, syncs | (depth << 6));
+			evo_data(push, magic);
+			evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
+			evo_data(push, owner | (proto << 8));
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_sor_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
+	.dpms = nv50_sor_dpms,
+	.mode_fixup = nv50_sor_mode_fixup,
+	.prepare = nv50_sor_prepare,
+	.commit = nv50_sor_commit,
+	.mode_set = nv50_sor_mode_set,
+	.disable = nv50_sor_disconnect,
+	.get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_sor_func = {
+	.destroy = nv50_sor_destroy,
+};
+
+static int
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_encoder *nv_encoder;
+	struct drm_encoder *encoder;
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+	nv_encoder->dcb = dcbe;
+	nv_encoder->or = ffs(dcbe->or) - 1;
+	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+
+	encoder = to_drm_encoder(nv_encoder);
+	encoder->possible_crtcs = dcbe->heads;
+	encoder->possible_clones = 0;
+	drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+	return 0;
+}
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+void
+nv50_display_fini(struct drm_device *dev)
+{
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
+	u32 *push = evo_wait(nv50_mast(dev), 32);
+	if (push) {
+		evo_mthd(push, 0x0088, 1);
+		evo_data(push, NvEvoSync);
+		evo_kick(push, nv50_mast(dev));
+		return evo_sync(dev);
+	}
+
+	return -EBUSY;
 }
 
 void
-nv50_display_intr(struct drm_device *dev)
+nv50_display_destroy(struct drm_device *dev)
 {
+	struct nv50_disp *disp = nv50_disp(dev);
+
+	nv50_dmac_destroy(disp->core, &disp->mast.base);
+
+	nouveau_bo_unmap(disp->sync);
+	if (disp->sync)
+		nouveau_bo_unpin(disp->sync);
+	nouveau_bo_ref(NULL, &disp->sync);
+
+	nouveau_display(dev)->priv = NULL;
+	kfree(disp);
+}
+
+int
+nv50_display_create(struct drm_device *dev)
+{
+	static const u16 oclass[] = {
+		NVE0_DISP_CLASS,
+		NVD0_DISP_CLASS,
+		NVA3_DISP_CLASS,
+		NV94_DISP_CLASS,
+		NVA0_DISP_CLASS,
+		NV84_DISP_CLASS,
+		NV50_DISP_CLASS,
+	};
 	struct nouveau_device *device = nouveau_dev(dev);
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_display *disp = nv50_display(dev);
-	uint32_t delayed = 0;
+	struct dcb_table *dcb = &drm->vbios.dcb;
+	struct drm_connector *connector, *tmp;
+	struct nv50_disp *disp;
+	struct dcb_output *dcbe;
+	int crtcs, ret, i;
 
-	while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
-		uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
-		uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
-		uint32_t clock;
+	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+	if (!disp)
+		return -ENOMEM;
 
-		NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+	nouveau_display(dev)->priv = disp;
+	nouveau_display(dev)->dtor = nv50_display_destroy;
+	nouveau_display(dev)->init = nv50_display_init;
+	nouveau_display(dev)->fini = nv50_display_fini;
 
-		if (!intr0 && !(intr1 & ~delayed))
+	/* small shared memory area we use for notifiers and semaphores */
+	ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &disp->sync);
+	if (!ret) {
+		ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+		if (!ret) {
+			ret = nouveau_bo_map(disp->sync);
+			if (ret)
+				nouveau_bo_unpin(disp->sync);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &disp->sync);
+	}
+
+	if (ret)
+		goto out;
+
+	/* attempt to allocate a supported evo display class */
+	ret = -ENODEV;
+	for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
+		ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+					 0xd1500000, oclass[i], NULL, 0,
+					 &disp->core);
+	}
+
+	if (ret)
+		goto out;
+
+	/* allocate master evo channel */
+	ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
+			      &(struct nv50_display_mast_class) {
+					.pushbuf = EVO_PUSH_HANDLE(MAST, 0),
+			      }, sizeof(struct nv50_display_mast_class),
+			      disp->sync->bo.offset, &disp->mast.base);
+	if (ret)
+		goto out;
+
+	/* create crtc objects to represent the hw heads */
+	if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
+		crtcs = nv_rd32(device, 0x022448);
+	else
+		crtcs = 2;
+
+	for (i = 0; i < crtcs; i++) {
+		ret = nv50_crtc_create(dev, disp->core, i);
+		if (ret)
+			goto out;
+	}
+
+	/* create encoder/connector objects based on VBIOS DCB table */
+	for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+		connector = nouveau_connector_create(dev, dcbe->connector);
+		if (IS_ERR(connector))
+			continue;
+
+		if (dcbe->location != DCB_LOC_ON_CHIP) {
+			NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
+				dcbe->type, ffs(dcbe->or) - 1);
+			continue;
+		}
+
+		switch (dcbe->type) {
+		case DCB_OUTPUT_TMDS:
+		case DCB_OUTPUT_LVDS:
+		case DCB_OUTPUT_DP:
+			nv50_sor_create(connector, dcbe);
 			break;
-
-		if (intr0 & 0x001f0000) {
-			nv50_display_error_handler(dev);
-			intr0 &= ~0x001f0000;
-		}
-
-		if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
-			intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-			delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-		}
-
-		clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
-				  NV50_PDISPLAY_INTR_1_CLK_UNK20 |
-				  NV50_PDISPLAY_INTR_1_CLK_UNK40));
-		if (clock) {
-			nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
-			tasklet_schedule(&disp->tasklet);
-			delayed |= clock;
-			intr1 &= ~clock;
-		}
-
-		if (intr0) {
-			NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
-			nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
-		}
-
-		if (intr1) {
-			NV_ERROR(drm,
-				 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
-			nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
+		case DCB_OUTPUT_ANALOG:
+			nv50_dac_create(connector, dcbe);
+			break;
+		default:
+			NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
+				dcbe->type, ffs(dcbe->or) - 1);
+			continue;
 		}
 	}
+
+	/* cull any connectors we created that don't have an encoder */
+	list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+		if (connector->encoder_ids[0])
+			continue;
+
+		NV_WARN(drm, "%s has no encoders, removing\n",
+			drm_get_connector_name(connector));
+		connector->funcs->destroy(connector);
+	}
+
+out:
+	if (ret)
+		nv50_display_destroy(dev);
+	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 973554d8a7a6..70da347aa8c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -30,77 +30,16 @@
 #include "nouveau_display.h"
 #include "nouveau_crtc.h"
 #include "nouveau_reg.h"
-#include "nv50_evo.h"
 
-struct nv50_display_crtc {
-	struct nouveau_channel *sync;
-	struct {
-		struct nouveau_bo *bo;
-		u32 offset;
-		u16 value;
-	} sem;
-};
+int  nv50_display_create(struct drm_device *);
+void nv50_display_destroy(struct drm_device *);
+int  nv50_display_init(struct drm_device *);
+void nv50_display_fini(struct drm_device *);
 
-struct nv50_display {
-	struct nouveau_channel *master;
-
-	struct nouveau_gpuobj *ramin;
-	u32 dmao;
-	u32 hash;
-
-	struct nv50_display_crtc crtc[2];
-
-	struct tasklet_struct tasklet;
-	struct {
-		struct dcb_output *dcb;
-		u16 script;
-		u32 pclk;
-	} irq;
-};
-
-static inline struct nv50_display *
-nv50_display(struct drm_device *dev)
-{
-	return nouveau_display(dev)->priv;
-}
-
-int nv50_display_early_init(struct drm_device *dev);
-void nv50_display_late_takedown(struct drm_device *dev);
-int nv50_display_create(struct drm_device *dev);
-int nv50_display_init(struct drm_device *dev);
-void nv50_display_fini(struct drm_device *dev);
-void nv50_display_destroy(struct drm_device *dev);
-void nv50_display_intr(struct drm_device *);
-int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
-int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
-
-u32  nv50_display_active_crtcs(struct drm_device *);
-
-int  nv50_display_sync(struct drm_device *);
-int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
-			    struct nouveau_channel *chan);
 void nv50_display_flip_stop(struct drm_crtc *);
-
-int  nv50_evo_create(struct drm_device *dev);
-void nv50_evo_destroy(struct drm_device *dev);
-int  nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
-			  u64 size);
-int  nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
-			 u64 base, u64 size, struct nouveau_gpuobj **);
-
-int  nvd0_display_create(struct drm_device *);
-void nvd0_display_destroy(struct drm_device *);
-int  nvd0_display_init(struct drm_device *);
-void nvd0_display_fini(struct drm_device *);
-void nvd0_display_intr(struct drm_device *);
-
-void nvd0_display_flip_stop(struct drm_crtc *);
-int  nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
 			    struct nouveau_channel *, u32 swap_interval);
 
 struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
 
 #endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
deleted file mode 100644
index 9f6f55cdfa77..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32
-nv50_evo_rd32(struct nouveau_object *object, u32 addr)
-{
-	void __iomem *iomem = object->oclass->ofuncs->rd08;
-	return ioread32_native(iomem + addr);
-}
-
-static void
-nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
-{
-	void __iomem *iomem = object->oclass->ofuncs->rd08;
-	iowrite32_native(data, iomem + addr);
-}
-
-static void
-nv50_evo_channel_del(struct nouveau_channel **pevo)
-{
-	struct nouveau_channel *evo = *pevo;
-
-	if (!evo)
-		return;
-	*pevo = NULL;
-
-	nouveau_bo_unmap(evo->push.buffer);
-	nouveau_bo_ref(NULL, &evo->push.buffer);
-
-	if (evo->object)
-		iounmap(evo->object->oclass->ofuncs);
-
-	kfree(evo);
-}
-
-int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
-		    u64 base, u64 size, struct nouveau_gpuobj **pobj)
-{
-	struct drm_device *dev = evo->fence;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_display *disp = nv50_display(dev);
-	u32 dmao = disp->dmao;
-	u32 hash = disp->hash;
-	u32 flags5;
-
-	if (nv_device(drm->device)->chipset < 0xc0) {
-		/* not supported on 0x50, specified in format mthd */
-		if (nv_device(drm->device)->chipset == 0x50)
-			memtype = 0;
-		flags5 = 0x00010000;
-	} else {
-		if (memtype & 0x80000000)
-			flags5 = 0x00000000; /* large pages */
-		else
-			flags5 = 0x00020000;
-	}
-
-	nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
-	nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
-	nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
-	nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
-					  upper_32_bits(base));
-	nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
-	nv_wo32(disp->ramin, dmao + 0x14, flags5);
-
-	nv_wo32(disp->ramin, hash + 0x00, handle);
-	nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
-					   evo->handle);
-
-	disp->dmao += 0x20;
-	disp->hash += 0x08;
-	return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, int chid,
-		     struct nouveau_channel **pevo)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_display *disp = nv50_display(dev);
-	struct nouveau_channel *evo;
-	int ret;
-
-	evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
-	if (!evo)
-		return -ENOMEM;
-	*pevo = evo;
-
-	evo->drm = drm;
-	evo->handle = chid;
-	evo->fence = dev;
-	evo->user_get = 4;
-	evo->user_put = 0;
-
-	ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
-			     &evo->push.buffer);
-	if (ret == 0)
-		ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
-	if (ret) {
-		NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
-		nv50_evo_channel_del(pevo);
-		return ret;
-	}
-
-	ret = nouveau_bo_map(evo->push.buffer);
-	if (ret) {
-		NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
-		nv50_evo_channel_del(pevo);
-		return ret;
-	}
-
-	evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
-#ifdef NOUVEAU_OBJECT_MAGIC
-	evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
-#endif
-	evo->object->parent = nv_object(disp->ramin)->parent;
-	evo->object->engine = nv_object(disp->ramin)->engine;
-	evo->object->oclass =
-		kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
-	evo->object->oclass->ofuncs =
-		kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
-	evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
-	evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
-	evo->object->oclass->ofuncs->rd08 =
-		ioremap(pci_resource_start(dev->pdev, 0) +
-			NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
-	return 0;
-}
-
-static int
-nv50_evo_channel_init(struct nouveau_channel *evo)
-{
-	struct nouveau_drm *drm = evo->drm;
-	struct nouveau_device *device = nv_device(drm->device);
-	int id = evo->handle, ret, i;
-	u64 pushbuf = evo->push.buffer->bo.offset;
-	u32 tmp;
-
-	tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
-	if ((tmp & 0x009f0000) == 0x00020000)
-		nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
-
-	tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
-	if ((tmp & 0x003f0000) == 0x00030000)
-		nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
-
-	/* initialise fifo */
-	nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
-		     NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
-		     NV50_PDISPLAY_EVO_DMA_CB_VALID);
-	nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
-	nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
-	nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
-		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-
-	nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
-	nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
-		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-	if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
-		NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
-			 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
-		return -EBUSY;
-	}
-
-	/* enable error reporting on the channel */
-	nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
-
-	evo->dma.max = (4096/4) - 2;
-	evo->dma.max &= ~7;
-	evo->dma.put = 0;
-	evo->dma.cur = evo->dma.put;
-	evo->dma.free = evo->dma.max - evo->dma.cur;
-
-	ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
-		OUT_RING(evo, 0);
-
-	return 0;
-}
-
-static void
-nv50_evo_channel_fini(struct nouveau_channel *evo)
-{
-	struct nouveau_drm *drm = evo->drm;
-	struct nouveau_device *device = nv_device(drm->device);
-	int id = evo->handle;
-
-	nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
-	nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
-	nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
-	nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
-	if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
-		NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
-			 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
-	}
-}
-
-void
-nv50_evo_destroy(struct drm_device *dev)
-{
-	struct nv50_display *disp = nv50_display(dev);
-	int i;
-
-	for (i = 0; i < 2; i++) {
-		if (disp->crtc[i].sem.bo) {
-			nouveau_bo_unmap(disp->crtc[i].sem.bo);
-			nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
-		}
-		nv50_evo_channel_del(&disp->crtc[i].sync);
-	}
-	nv50_evo_channel_del(&disp->master);
-	nouveau_gpuobj_ref(NULL, &disp->ramin);
-}
-
-int
-nv50_evo_create(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_fb *pfb = nouveau_fb(drm->device);
-	struct nv50_display *disp = nv50_display(dev);
-	struct nouveau_channel *evo;
-	int ret, i, j;
-
-	/* setup object management on it, any other evo channel will
-	 * use this also as there's no per-channel support on the
-	 * hardware
-	 */
-	ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
-				 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
-	if (ret) {
-		NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
-		goto err;
-	}
-
-	disp->hash = 0x0000;
-	disp->dmao = 0x1000;
-
-	/* create primary evo channel, the one we use for modesetting
-	 * purporses
-	 */
-	ret = nv50_evo_channel_new(dev, 0, &disp->master);
-	if (ret)
-		return ret;
-	evo = disp->master;
-
-	ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
-				  disp->ramin->addr + 0x2000, 0x1000, NULL);
-	if (ret)
-		goto err;
-
-	/* create some default objects for the scanout memtypes we support */
-	ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
-				  0, pfb->ram.size, NULL);
-	if (ret)
-		goto err;
-
-	ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
-				  0, pfb->ram.size, NULL);
-	if (ret)
-		goto err;
-
-	ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
-				  (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
-				  0, pfb->ram.size, NULL);
-	if (ret)
-		goto err;
-
-	ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
-				  (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
-				  0, pfb->ram.size, NULL);
-	if (ret)
-		goto err;
-
-	/* create "display sync" channels and other structures we need
-	 * to implement page flipping
-	 */
-	for (i = 0; i < 2; i++) {
-		struct nv50_display_crtc *dispc = &disp->crtc[i];
-		u64 offset;
-
-		ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
-		if (ret)
-			goto err;
-
-		ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
-				     0, 0x0000, NULL, &dispc->sem.bo);
-		if (!ret) {
-			ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
-			if (!ret)
-				ret = nouveau_bo_map(dispc->sem.bo);
-			if (ret)
-				nouveau_bo_ref(NULL, &dispc->sem.bo);
-			offset = dispc->sem.bo->bo.offset;
-		}
-
-		if (ret)
-			goto err;
-
-		ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
-					  offset, 4096, NULL);
-		if (ret)
-			goto err;
-
-		ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
-					  0, pfb->ram.size, NULL);
-		if (ret)
-			goto err;
-
-		ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
-					  (nv_device(drm->device)->chipset < 0xc0 ?
-					  0x7a : 0xfe),
-					  0, pfb->ram.size, NULL);
-		if (ret)
-			goto err;
-
-		ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
-					  (nv_device(drm->device)->chipset < 0xc0 ?
-					  0x70 : 0xfe),
-					  0, pfb->ram.size, NULL);
-		if (ret)
-			goto err;
-
-		for (j = 0; j < 4096; j += 4)
-			nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
-		dispc->sem.offset = 0;
-	}
-
-	return 0;
-
-err:
-	nv50_evo_destroy(dev);
-	return ret;
-}
-
-int
-nv50_evo_init(struct drm_device *dev)
-{
-	struct nv50_display *disp = nv50_display(dev);
-	int ret, i;
-
-	ret = nv50_evo_channel_init(disp->master);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < 2; i++) {
-		ret = nv50_evo_channel_init(disp->crtc[i].sync);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-void
-nv50_evo_fini(struct drm_device *dev)
-{
-	struct nv50_display *disp = nv50_display(dev);
-	int i;
-
-	for (i = 0; i < 2; i++) {
-		if (disp->crtc[i].sync)
-			nv50_evo_channel_fini(disp->crtc[i].sync);
-	}
-
-	if (disp->master)
-		nv50_evo_channel_fini(disp->master);
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
deleted file mode 100644
index 771d879bc834..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NV50_EVO_H__
-#define __NV50_EVO_H__
-
-#define NV50_EVO_UPDATE                                              0x00000080
-#define NV50_EVO_UNK84                                               0x00000084
-#define NV50_EVO_UNK84_NOTIFY                                        0x40000000
-#define NV50_EVO_UNK84_NOTIFY_DISABLED                               0x00000000
-#define NV50_EVO_UNK84_NOTIFY_ENABLED                                0x40000000
-#define NV50_EVO_DMA_NOTIFY                                          0x00000088
-#define NV50_EVO_DMA_NOTIFY_HANDLE                                   0xffffffff
-#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE                              0x00000000
-#define NV50_EVO_UNK8C                                               0x0000008C
-
-#define NV50_EVO_DAC(n, r)                       ((n) * 0x80 + NV50_EVO_DAC_##r)
-#define NV50_EVO_DAC_MODE_CTRL                                       0x00000400
-#define NV50_EVO_DAC_MODE_CTRL_CRTC0                                 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL_CRTC1                                 0x00000002
-#define NV50_EVO_DAC_MODE_CTRL2                                      0x00000404
-#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC                               0x00000001
-#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC                               0x00000002
-
-#define NV50_EVO_SOR(n, r)                       ((n) * 0x40 + NV50_EVO_SOR_##r)
-#define NV50_EVO_SOR_MODE_CTRL                                       0x00000600
-#define NV50_EVO_SOR_MODE_CTRL_CRTC0                                 0x00000001
-#define NV50_EVO_SOR_MODE_CTRL_CRTC1                                 0x00000002
-#define NV50_EVO_SOR_MODE_CTRL_TMDS                                  0x00000100
-#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK                        0x00000400
-#define NV50_EVO_SOR_MODE_CTRL_NHSYNC                                0x00001000
-#define NV50_EVO_SOR_MODE_CTRL_NVSYNC                                0x00002000
-
-#define NV50_EVO_CRTC(n, r)                    ((n) * 0x400 + NV50_EVO_CRTC_##r)
-#define NV84_EVO_CRTC(n, r)                    ((n) * 0x400 + NV84_EVO_CRTC_##r)
-#define NV50_EVO_CRTC_UNK0800                                        0x00000800
-#define NV50_EVO_CRTC_CLOCK                                          0x00000804
-#define NV50_EVO_CRTC_INTERLACE                                      0x00000808
-#define NV50_EVO_CRTC_DISPLAY_START                                  0x00000810
-#define NV50_EVO_CRTC_DISPLAY_TOTAL                                  0x00000814
-#define NV50_EVO_CRTC_SYNC_DURATION                                  0x00000818
-#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END                        0x0000081c
-#define NV50_EVO_CRTC_UNK0820                                        0x00000820
-#define NV50_EVO_CRTC_UNK0824                                        0x00000824
-#define NV50_EVO_CRTC_UNK082C                                        0x0000082c
-#define NV50_EVO_CRTC_CLUT_MODE                                      0x00000840
-/* You can't have a palette in 8 bit mode (=OFF) */
-#define NV50_EVO_CRTC_CLUT_MODE_BLANK                                0x00000000
-#define NV50_EVO_CRTC_CLUT_MODE_OFF                                  0x80000000
-#define NV50_EVO_CRTC_CLUT_MODE_ON                                   0xC0000000
-#define NV50_EVO_CRTC_CLUT_OFFSET                                    0x00000844
-#define NV84_EVO_CRTC_CLUT_DMA                                       0x0000085C
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE                                0xffffffff
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE                           0x00000000
-#define NV50_EVO_CRTC_FB_OFFSET                                      0x00000860
-#define NV50_EVO_CRTC_FB_SIZE                                        0x00000868
-#define NV50_EVO_CRTC_FB_CONFIG                                      0x0000086c
-#define NV50_EVO_CRTC_FB_CONFIG_MODE                                 0x00100000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE                            0x00000000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH                           0x00100000
-#define NV50_EVO_CRTC_FB_DEPTH                                       0x00000870
-#define NV50_EVO_CRTC_FB_DEPTH_8                                     0x00001e00
-#define NV50_EVO_CRTC_FB_DEPTH_15                                    0x0000e900
-#define NV50_EVO_CRTC_FB_DEPTH_16                                    0x0000e800
-#define NV50_EVO_CRTC_FB_DEPTH_24                                    0x0000cf00
-#define NV50_EVO_CRTC_FB_DEPTH_30                                    0x0000d100
-#define NV50_EVO_CRTC_FB_DMA                                         0x00000874
-#define NV50_EVO_CRTC_FB_DMA_HANDLE                                  0xffffffff
-#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE                             0x00000000
-#define NV50_EVO_CRTC_CURSOR_CTRL                                    0x00000880
-#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE                               0x05000000
-#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW                               0x85000000
-#define NV50_EVO_CRTC_CURSOR_OFFSET                                  0x00000884
-#define NV84_EVO_CRTC_CURSOR_DMA                                     0x0000089c
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE                              0xffffffff
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE                         0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL                                    0x000008a0
-#define NV50_EVO_CRTC_DITHER_CTRL_OFF                                0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL_ON                                 0x00000011
-#define NV50_EVO_CRTC_SCALE_CTRL                                     0x000008a4
-#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE                            0x00000000
-#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE                              0x00000009
-#define NV50_EVO_CRTC_COLOR_CTRL                                     0x000008a8
-#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE                            0x000fff00
-#define NV50_EVO_CRTC_COLOR_CTRL_HUE                                 0xfff00000
-#define NV50_EVO_CRTC_FB_POS                                         0x000008c0
-#define NV50_EVO_CRTC_REAL_RES                                       0x000008c8
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET                            0x000008d4
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
-	((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
-/* Both of these are needed, otherwise nothing happens. */
-#define NV50_EVO_CRTC_SCALE_RES1                                     0x000008d8
-#define NV50_EVO_CRTC_SCALE_RES2                                     0x000008dc
-#define NV50_EVO_CRTC_UNK900                                         0x00000900
-#define NV50_EVO_CRTC_UNK904                                         0x00000904
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index e0763ea88ee2..c20f2727ea0b 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -110,8 +110,11 @@ nv50_fence_create(struct nouveau_drm *drm)
 			     0, 0x0000, NULL, &priv->bo);
 	if (!ret) {
 		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
-		if (!ret)
+		if (!ret) {
 			ret = nouveau_bo_map(priv->bo);
+			if (ret)
+				nouveau_bo_unpin(priv->bo);
+		}
 		if (ret)
 			nouveau_bo_ref(NULL, &priv->bo);
 	}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index c4a65039b1ca..8bd5d2781baf 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_device *device = nouveau_dev(dev);
-	u32 crtc_mask = nv50_display_active_crtcs(dev);
+	u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
 	struct nouveau_mem_exec_func exec = {
 		.dev = dev,
 		.precharge = mclk_precharge,
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
deleted file mode 100644
index b562b59e1326..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static u32
-nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
-	static const u8 nv50[] = { 16, 8, 0, 24 };
-	if (nv_device(drm->device)->chipset == 0xaf)
-		return nvaf[lane];
-	return nv50[lane];
-}
-
-static void
-nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-	nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
-}
-
-static void
-nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
-		      u8 lane, u8 swing, u8 preem)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-	u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
-	u32 mask = 0x000000ff << shift;
-	u8 *table, *entry, *config;
-
-	table = nouveau_dp_bios_data(dev, dcb, &entry);
-	if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
-		NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
-		return;
-	}
-
-	config = entry + table[4];
-	while (config[0] != swing || config[1] != preem) {
-		config += table[5];
-		if (config >= entry + table[4] + entry[4] * table[5])
-			return;
-	}
-
-	nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
-	nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
-	nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
-}
-
-static void
-nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
-		     int link_nr, u32 link_bw, bool enhframe)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-	u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
-	u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
-	u8 *table, *entry, mask;
-	int i;
-
-	table = nouveau_dp_bios_data(dev, dcb, &entry);
-	if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
-		NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
-		return;
-	}
-
-	entry = ROMPTR(dev, entry[10]);
-	if (entry) {
-		while (link_bw < ROM16(entry[0]) * 10)
-			entry += 4;
-
-		nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
-	}
-
-	dpctrl |= ((1 << link_nr) - 1) << 16;
-	if (enhframe)
-		dpctrl |= 0x00004000;
-
-	if (link_bw > 162000)
-		clksor |= 0x00040000;
-
-	nv_wr32(device, 0x614300 + (or * 0x800), clksor);
-	nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
-
-	mask = 0;
-	for (i = 0; i < link_nr; i++)
-		mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
-	nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
-}
-
-static void
-nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
-	u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
-	if (clksor & 0x000c0000)
-		*bw = 270000;
-	else
-		*bw = 162000;
-
-	if      (dpctrl > 0x00030000) *nr = 4;
-	else if (dpctrl > 0x00010000) *nr = 2;
-	else			      *nr = 1;
-}
-
-void
-nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	const u32 symbol = 100000;
-	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
-	int TU, VTUi, VTUf, VTUa;
-	u64 link_data_rate, link_ratio, unk;
-	u32 best_diff = 64 * symbol;
-	u32 link_nr, link_bw, r;
-
-	/* calculate packed data rate for each lane */
-	nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
-	link_data_rate = (clk * bpp / 8) / link_nr;
-
-	/* calculate ratio of packed data rate to link symbol rate */
-	link_ratio = link_data_rate * symbol;
-	r = do_div(link_ratio, link_bw);
-
-	for (TU = 64; TU >= 32; TU--) {
-		/* calculate average number of valid symbols in each TU */
-		u32 tu_valid = link_ratio * TU;
-		u32 calc, diff;
-
-		/* find a hw representation for the fraction.. */
-		VTUi = tu_valid / symbol;
-		calc = VTUi * symbol;
-		diff = tu_valid - calc;
-		if (diff) {
-			if (diff >= (symbol / 2)) {
-				VTUf = symbol / (symbol - diff);
-				if (symbol - (VTUf * diff))
-					VTUf++;
-
-				if (VTUf <= 15) {
-					VTUa  = 1;
-					calc += symbol - (symbol / VTUf);
-				} else {
-					VTUa  = 0;
-					VTUf  = 1;
-					calc += symbol;
-				}
-			} else {
-				VTUa  = 0;
-				VTUf  = min((int)(symbol / diff), 15);
-				calc += symbol / VTUf;
-			}
-
-			diff = calc - tu_valid;
-		} else {
-			/* no remainder, but the hw doesn't like the fractional
-			 * part to be zero.  decrement the integer part and
-			 * have the fraction add a whole symbol back
-			 */
-			VTUa = 0;
-			VTUf = 1;
-			VTUi--;
-		}
-
-		if (diff < best_diff) {
-			best_diff = diff;
-			bestTU = TU;
-			bestVTUa = VTUa;
-			bestVTUf = VTUf;
-			bestVTUi = VTUi;
-			if (diff == 0)
-				break;
-		}
-	}
-
-	if (!bestTU) {
-		NV_ERROR(drm, "DP: unable to find suitable config\n");
-		return;
-	}
-
-	/* XXX close to vbios numbers, but not right */
-	unk  = (symbol - link_ratio) * bestTU;
-	unk *= link_ratio;
-	r = do_div(unk, symbol);
-	r = do_div(unk, symbol);
-	unk += 6;
-
-	nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
-	nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
-							     bestVTUf << 16 |
-							     bestVTUi << 8 |
-							     unk);
-}
-static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_channel *evo = nv50_display(dev)->master;
-	int ret;
-
-	if (!nv_encoder->crtc)
-		return;
-	nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
-	NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
-
-	ret = RING_SPACE(evo, 4);
-	if (ret) {
-		NV_ERROR(drm, "no space while disconnecting SOR\n");
-		return;
-	}
-	BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
-	OUT_RING  (evo, 0);
-	BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-	OUT_RING  (evo, 0);
-
-	nouveau_hdmi_mode_set(encoder, NULL);
-
-	nv_encoder->crtc = NULL;
-	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-}
-
-static void
-nv50_sor_dpms(struct drm_encoder *encoder, int mode)
-{
-	struct nouveau_device *device = nouveau_dev(encoder->dev);
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_encoder *enc;
-	uint32_t val;
-	int or = nv_encoder->or;
-
-	NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
-
-	nv_encoder->last_dpms = mode;
-	list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-		struct nouveau_encoder *nvenc = nouveau_encoder(enc);
-
-		if (nvenc == nv_encoder ||
-		    (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
-		     nvenc->dcb->type != DCB_OUTPUT_LVDS &&
-		     nvenc->dcb->type != DCB_OUTPUT_DP) ||
-		    nvenc->dcb->or != nv_encoder->dcb->or)
-			continue;
-
-		if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
-			return;
-	}
-
-	/* wait for it to be done */
-	if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
-		     NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
-		NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
-		NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
-			 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
-	}
-
-	val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
-
-	if (mode == DRM_MODE_DPMS_ON)
-		val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-	else
-		val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-
-	nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
-		NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
-	if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
-		     NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
-		NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
-		NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
-			 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
-	}
-
-	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-		struct dp_train_func func = {
-			.link_set = nv50_sor_dp_link_set,
-			.train_set = nv50_sor_dp_train_set,
-			.train_adj = nv50_sor_dp_train_adj
-		};
-
-		nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
-	}
-}
-
-static void
-nv50_sor_save(struct drm_encoder *encoder)
-{
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_sor_restore(struct drm_encoder *encoder)
-{
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder,
-		    const struct drm_display_mode *mode,
-		    struct drm_display_mode *adjusted_mode)
-{
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_connector *connector;
-
-	NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
-	connector = nouveau_encoder_connector_get(nv_encoder);
-	if (!connector) {
-		NV_ERROR(drm, "Encoder has no connector\n");
-		return false;
-	}
-
-	if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
-	     connector->native_mode)
-		drm_mode_copy(adjusted_mode, connector->native_mode);
-
-	return true;
-}
-
-static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	nv50_sor_disconnect(encoder);
-	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-		/* avoid race between link training and supervisor intr */
-		nv50_display_sync(encoder->dev);
-	}
-}
-
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
-		  struct drm_display_mode *mode)
-{
-	struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
-	struct nouveau_connector *nv_connector;
-	uint32_t mode_ctl = 0;
-	int ret;
-
-	NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
-		     nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-	nv_encoder->crtc = encoder->crtc;
-
-	switch (nv_encoder->dcb->type) {
-	case DCB_OUTPUT_TMDS:
-		if (nv_encoder->dcb->sorconf.link & 1) {
-			if (mode->clock < 165000)
-				mode_ctl = 0x0100;
-			else
-				mode_ctl = 0x0500;
-		} else
-			mode_ctl = 0x0200;
-
-		nouveau_hdmi_mode_set(encoder, mode);
-		break;
-	case DCB_OUTPUT_DP:
-		nv_connector = nouveau_encoder_connector_get(nv_encoder);
-		if (nv_connector && nv_connector->base.display_info.bpc == 6) {
-			nv_encoder->dp.datarate = mode->clock * 18 / 8;
-			mode_ctl |= 0x00020000;
-		} else {
-			nv_encoder->dp.datarate = mode->clock * 24 / 8;
-			mode_ctl |= 0x00050000;
-		}
-
-		if (nv_encoder->dcb->sorconf.link & 1)
-			mode_ctl |= 0x00000800;
-		else
-			mode_ctl |= 0x00000900;
-		break;
-	default:
-		break;
-	}
-
-	if (crtc->index == 1)
-		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
-	else
-		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
-
-	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
-
-	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
-
-	nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
-	ret = RING_SPACE(evo, 2);
-	if (ret) {
-		NV_ERROR(drm, "no space while connecting SOR\n");
-		nv_encoder->crtc = NULL;
-		return;
-	}
-	BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
-	OUT_RING(evo, mode_ctl);
-}
-
-static struct drm_crtc *
-nv50_sor_crtc_get(struct drm_encoder *encoder)
-{
-	return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
-	.dpms = nv50_sor_dpms,
-	.save = nv50_sor_save,
-	.restore = nv50_sor_restore,
-	.mode_fixup = nv50_sor_mode_fixup,
-	.prepare = nv50_sor_prepare,
-	.commit = nv50_sor_commit,
-	.mode_set = nv50_sor_mode_set,
-	.get_crtc = nv50_sor_crtc_get,
-	.detect = NULL,
-	.disable = nv50_sor_disconnect
-};
-
-static void
-nv50_sor_destroy(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
-	NV_DEBUG(drm, "\n");
-
-	drm_encoder_cleanup(encoder);
-
-	kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
-	.destroy = nv50_sor_destroy,
-};
-
-int
-nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
-{
-	struct nouveau_encoder *nv_encoder = NULL;
-	struct drm_device *dev = connector->dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct drm_encoder *encoder;
-	int type;
-
-	NV_DEBUG(drm, "\n");
-
-	switch (entry->type) {
-	case DCB_OUTPUT_TMDS:
-	case DCB_OUTPUT_DP:
-		type = DRM_MODE_ENCODER_TMDS;
-		break;
-	case DCB_OUTPUT_LVDS:
-		type = DRM_MODE_ENCODER_LVDS;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-	if (!nv_encoder)
-		return -ENOMEM;
-	encoder = to_drm_encoder(nv_encoder);
-
-	nv_encoder->dcb = entry;
-	nv_encoder->or = ffs(entry->or) - 1;
-	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
-	drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
-	drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
-
-	encoder->possible_crtcs = entry->heads;
-	encoder->possible_clones = 0;
-
-	drm_mode_connector_attach_encoder(connector, encoder);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 53299eac9676..2a56b1b551cb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -114,17 +114,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
 	struct nvc0_fence_chan *fctx = chan->fence;
 	int i;
 
-	if (nv_device(chan->drm->device)->card_type >= NV_D0) {
-		for (i = 0; i < dev->mode_config.num_crtc; i++) {
-			struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
-			nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
-		}
-	} else
-	if (nv_device(chan->drm->device)->card_type >= NV_50) {
-		for (i = 0; i < dev->mode_config.num_crtc; i++) {
-			struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
-			nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
-		}
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+		nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
 	}
 
 	nouveau_bo_vma_del(priv->bo, &fctx->vma);
@@ -154,12 +146,7 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
 
 	/* map display semaphore buffers into channel's vm */
 	for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
-		struct nouveau_bo *bo;
-		if (nv_device(chan->drm->device)->card_type >= NV_D0)
-			bo = nvd0_display_crtc_sema(chan->drm->dev, i);
-		else
-			bo = nv50_display_crtc_sema(chan->drm->dev, i);
-
+		struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
 		ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
 	}
 
@@ -203,6 +190,8 @@ nvc0_fence_destroy(struct nouveau_drm *drm)
 {
 	struct nvc0_fence_priv *priv = drm->fence;
 	nouveau_bo_unmap(priv->bo);
+	if (priv->bo)
+		nouveau_bo_unpin(priv->bo);
 	nouveau_bo_ref(NULL, &priv->bo);
 	drm->fence = NULL;
 	kfree(priv);
@@ -232,8 +221,11 @@ nvc0_fence_create(struct nouveau_drm *drm)
 			     TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
 	if (ret == 0) {
 		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
-		if (ret == 0)
+		if (ret == 0) {
 			ret = nouveau_bo_map(priv->bo);
+			if (ret)
+				nouveau_bo_unpin(priv->bo);
+		}
 		if (ret)
 			nouveau_bo_ref(NULL, &priv->bo);
 	}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644
index c402fca2b2b8..000000000000
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ /dev/null
@@ -1,2141 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_fence.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-
-#define EVO_DMA_NR 9
-
-#define EVO_MASTER  (0x00)
-#define EVO_FLIP(c) (0x01 + (c))
-#define EVO_OVLY(c) (0x05 + (c))
-#define EVO_OIMM(c) (0x09 + (c))
-#define EVO_CURS(c) (0x0d + (c))
-
-/* offsets in shared sync bo of various structures */
-#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
-#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
-
-struct evo {
-	int idx;
-	dma_addr_t handle;
-	u32 *ptr;
-	struct {
-		u32 offset;
-		u16 value;
-	} sem;
-};
-
-struct nvd0_display {
-	struct nouveau_gpuobj *mem;
-	struct nouveau_bo *sync;
-	struct evo evo[9];
-
-	struct tasklet_struct tasklet;
-	u32 modeset;
-};
-
-static struct nvd0_display *
-nvd0_display(struct drm_device *dev)
-{
-	return nouveau_display(dev)->priv;
-}
-
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
-{
-	return nouveau_encoder(encoder)->crtc;
-}
-
-/******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static inline int
-evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	int ret = 0;
-	nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
-	nv_wr32(device, 0x610704 + (id * 0x10), data);
-	nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
-	if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
-		ret = -EBUSY;
-	nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
-	return ret;
-}
-
-static u32 *
-evo_wait(struct drm_device *dev, int id, int nr)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvd0_display *disp = nvd0_display(dev);
-	u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
-
-	if (put + nr >= (PAGE_SIZE / 4)) {
-		disp->evo[id].ptr[put] = 0x20000000;
-
-		nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
-		if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
-			NV_ERROR(drm, "evo %d dma stalled\n", id);
-			return NULL;
-		}
-
-		put = 0;
-	}
-
-	return disp->evo[id].ptr + put;
-}
-
-static void
-evo_kick(u32 *push, struct drm_device *dev, int id)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nvd0_display *disp = nvd0_display(dev);
-
-	nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
-}
-
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d)   *((p)++) = (d)
-
-static int
-evo_init_dma(struct drm_device *dev, int ch)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvd0_display *disp = nvd0_display(dev);
-	u32 flags;
-
-	flags = 0x00000000;
-	if (ch == EVO_MASTER)
-		flags |= 0x01000000;
-
-	nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
-	nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
-	nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
-	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
-	nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
-	nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
-	if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
-		NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
-			      nv_rd32(device, 0x610490 + (ch * 0x0010)));
-		return -EBUSY;
-	}
-
-	nv_mask(device, 0x610090, (1 << ch), (1 << ch));
-	nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
-	return 0;
-}
-
-static void
-evo_fini_dma(struct drm_device *dev, int ch)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-
-	if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
-		return;
-
-	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
-	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
-	nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
-	nv_mask(device, 0x610090, (1 << ch), 0x00000000);
-	nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static inline void
-evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
-}
-
-static int
-evo_init_pio(struct drm_device *dev, int ch)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
-	if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
-		NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
-			      nv_rd32(device, 0x610490 + (ch * 0x0010)));
-		return -EBUSY;
-	}
-
-	nv_mask(device, 0x610090, (1 << ch), (1 << ch));
-	nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
-	return 0;
-}
-
-static void
-evo_fini_pio(struct drm_device *dev, int ch)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-
-	if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
-		return;
-
-	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
-	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
-	nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
-	nv_mask(device, 0x610090, (1 << ch), 0x00000000);
-	nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static bool
-evo_sync_wait(void *data)
-{
-	return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
-}
-
-static int
-evo_sync(struct drm_device *dev, int ch)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nvd0_display *disp = nvd0_display(dev);
-	u32 *push = evo_wait(dev, ch, 8);
-	if (push) {
-		nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
-		evo_mthd(push, 0x0084, 1);
-		evo_data(push, 0x80000000 | EVO_MAST_NTFY);
-		evo_mthd(push, 0x0080, 2);
-		evo_data(push, 0x00000000);
-		evo_data(push, 0x00000000);
-		evo_kick(push, dev, ch);
-		if (nv_wait_cb(device, evo_sync_wait, disp->sync))
-			return 0;
-	}
-
-	return -EBUSY;
-}
-
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
-{
-	return nvd0_display(dev)->sync;
-}
-
-void
-nvd0_display_flip_stop(struct drm_crtc *crtc)
-{
-	struct nvd0_display *disp = nvd0_display(crtc->dev);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
-	u32 *push;
-
-	push = evo_wait(crtc->dev, evo->idx, 8);
-	if (push) {
-		evo_mthd(push, 0x0084, 1);
-		evo_data(push, 0x00000000);
-		evo_mthd(push, 0x0094, 1);
-		evo_data(push, 0x00000000);
-		evo_mthd(push, 0x00c0, 1);
-		evo_data(push, 0x00000000);
-		evo_mthd(push, 0x0080, 1);
-		evo_data(push, 0x00000000);
-		evo_kick(push, crtc->dev, evo->idx);
-	}
-}
-
-int
-nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-		       struct nouveau_channel *chan, u32 swap_interval)
-{
-	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
-	struct nvd0_display *disp = nvd0_display(crtc->dev);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
-	u64 offset;
-	u32 *push;
-	int ret;
-
-	swap_interval <<= 4;
-	if (swap_interval == 0)
-		swap_interval |= 0x100;
-
-	push = evo_wait(crtc->dev, evo->idx, 128);
-	if (unlikely(push == NULL))
-		return -EBUSY;
-
-	/* synchronise with the rendering channel, if necessary */
-	if (likely(chan)) {
-		ret = RING_SPACE(chan, 10);
-		if (ret)
-			return ret;
-
-
-		offset  = nvc0_fence_crtc(chan, nv_crtc->index);
-		offset += evo->sem.offset;
-
-		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-		OUT_RING  (chan, upper_32_bits(offset));
-		OUT_RING  (chan, lower_32_bits(offset));
-		OUT_RING  (chan, 0xf00d0000 | evo->sem.value);
-		OUT_RING  (chan, 0x1002);
-		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-		OUT_RING  (chan, upper_32_bits(offset));
-		OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
-		OUT_RING  (chan, 0x74b1e000);
-		OUT_RING  (chan, 0x1001);
-		FIRE_RING (chan);
-	} else {
-		nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
-				0xf00d0000 | evo->sem.value);
-		evo_sync(crtc->dev, EVO_MASTER);
-	}
-
-	/* queue the flip */
-	evo_mthd(push, 0x0100, 1);
-	evo_data(push, 0xfffe0000);
-	evo_mthd(push, 0x0084, 1);
-	evo_data(push, swap_interval);
-	if (!(swap_interval & 0x00000100)) {
-		evo_mthd(push, 0x00e0, 1);
-		evo_data(push, 0x40000000);
-	}
-	evo_mthd(push, 0x0088, 4);
-	evo_data(push, evo->sem.offset);
-	evo_data(push, 0xf00d0000 | evo->sem.value);
-	evo_data(push, 0x74b1e000);
-	evo_data(push, NvEvoSync);
-	evo_mthd(push, 0x00a0, 2);
-	evo_data(push, 0x00000000);
-	evo_data(push, 0x00000000);
-	evo_mthd(push, 0x00c0, 1);
-	evo_data(push, nv_fb->r_dma);
-	evo_mthd(push, 0x0110, 2);
-	evo_data(push, 0x00000000);
-	evo_data(push, 0x00000000);
-	evo_mthd(push, 0x0400, 5);
-	evo_data(push, nv_fb->nvbo->bo.offset >> 8);
-	evo_data(push, 0);
-	evo_data(push, (fb->height << 16) | fb->width);
-	evo_data(push, nv_fb->r_pitch);
-	evo_data(push, nv_fb->r_format);
-	evo_mthd(push, 0x0080, 1);
-	evo_data(push, 0x00000000);
-	evo_kick(push, crtc->dev, evo->idx);
-
-	evo->sem.offset ^= 0x10;
-	evo->sem.value++;
-	return 0;
-}
-
-/******************************************************************************
- * CRTC
- *****************************************************************************/
-static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
-	struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
-	struct drm_device *dev = nv_crtc->base.dev;
-	struct nouveau_connector *nv_connector;
-	struct drm_connector *connector;
-	u32 *push, mode = 0x00;
-	u32 mthd;
-
-	nv_connector = nouveau_crtc_connector_get(nv_crtc);
-	connector = &nv_connector->base;
-	if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
-		if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
-			mode = DITHERING_MODE_DYNAMIC2X2;
-	} else {
-		mode = nv_connector->dithering_mode;
-	}
-
-	if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
-		if (connector->display_info.bpc >= 8)
-			mode |= DITHERING_DEPTH_8BPC;
-	} else {
-		mode |= nv_connector->dithering_depth;
-	}
-
-	if (nv_device(drm->device)->card_type < NV_E0)
-		mthd = 0x0490 + (nv_crtc->index * 0x0300);
-	else
-		mthd = 0x04a0 + (nv_crtc->index * 0x0300);
-
-	push = evo_wait(dev, EVO_MASTER, 4);
-	if (push) {
-		evo_mthd(push, mthd, 1);
-		evo_data(push, mode);
-		if (update) {
-			evo_mthd(push, 0x0080, 1);
-			evo_data(push, 0x00000000);
-		}
-		evo_kick(push, dev, EVO_MASTER);
-	}
-
-	return 0;
-}
-
-static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
-	struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
-	struct drm_device *dev = nv_crtc->base.dev;
-	struct drm_crtc *crtc = &nv_crtc->base;
-	struct nouveau_connector *nv_connector;
-	int mode = DRM_MODE_SCALE_NONE;
-	u32 oX, oY, *push;
-
-	/* start off at the resolution we programmed the crtc for, this
-	 * effectively handles NONE/FULL scaling
-	 */
-	nv_connector = nouveau_crtc_connector_get(nv_crtc);
-	if (nv_connector && nv_connector->native_mode)
-		mode = nv_connector->scaling_mode;
-
-	if (mode != DRM_MODE_SCALE_NONE)
-		omode = nv_connector->native_mode;
-	else
-		omode = umode;
-
-	oX = omode->hdisplay;
-	oY = omode->vdisplay;
-	if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
-		oY *= 2;
-
-	/* add overscan compensation if necessary, will keep the aspect
-	 * ratio the same as the backend mode unless overridden by the
-	 * user setting both hborder and vborder properties.
-	 */
-	if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
-			     (nv_connector->underscan == UNDERSCAN_AUTO &&
-			      nv_connector->edid &&
-			      drm_detect_hdmi_monitor(nv_connector->edid)))) {
-		u32 bX = nv_connector->underscan_hborder;
-		u32 bY = nv_connector->underscan_vborder;
-		u32 aspect = (oY << 19) / oX;
-
-		if (bX) {
-			oX -= (bX * 2);
-			if (bY) oY -= (bY * 2);
-			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-		} else {
-			oX -= (oX >> 4) + 32;
-			if (bY) oY -= (bY * 2);
-			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-		}
-	}
-
-	/* handle CENTER/ASPECT scaling, taking into account the areas
-	 * removed already for overscan compensation
-	 */
-	switch (mode) {
-	case DRM_MODE_SCALE_CENTER:
-		oX = min((u32)umode->hdisplay, oX);
-		oY = min((u32)umode->vdisplay, oY);
-		/* fall-through */
-	case DRM_MODE_SCALE_ASPECT:
-		if (oY < oX) {
-			u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
-			oX = ((oY * aspect) + (aspect / 2)) >> 19;
-		} else {
-			u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
-			oY = ((oX * aspect) + (aspect / 2)) >> 19;
-		}
-		break;
-	default:
-		break;
-	}
-
-	push = evo_wait(dev, EVO_MASTER, 8);
-	if (push) {
-		evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
-		evo_data(push, (oY << 16) | oX);
-		evo_data(push, (oY << 16) | oX);
-		evo_data(push, (oY << 16) | oX);
-		evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
-		evo_data(push, 0x00000000);
-		evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
-		evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
-		evo_kick(push, dev, EVO_MASTER);
-		if (update) {
-			nvd0_display_flip_stop(crtc);
-			nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-		}
-	}
-
-	return 0;
-}
-
-static int
-nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
-		    int x, int y, bool update)
-{
-	struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
-	u32 *push;
-
-	push = evo_wait(fb->dev, EVO_MASTER, 16);
-	if (push) {
-		evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
-		evo_data(push, nvfb->nvbo->bo.offset >> 8);
-		evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
-		evo_data(push, (fb->height << 16) | fb->width);
-		evo_data(push, nvfb->r_pitch);
-		evo_data(push, nvfb->r_format);
-		evo_data(push, nvfb->r_dma);
-		evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
-		evo_data(push, (y << 16) | x);
-		if (update) {
-			evo_mthd(push, 0x0080, 1);
-			evo_data(push, 0x00000000);
-		}
-		evo_kick(push, fb->dev, EVO_MASTER);
-	}
-
-	nv_crtc->fb.tile_flags = nvfb->r_dma;
-	return 0;
-}
-
-static void
-nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
-{
-	struct drm_device *dev = nv_crtc->base.dev;
-	u32 *push = evo_wait(dev, EVO_MASTER, 16);
-	if (push) {
-		if (show) {
-			evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
-			evo_data(push, 0x85000000);
-			evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
-			evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
-			evo_data(push, NvEvoVRAM);
-		} else {
-			evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
-			evo_data(push, 0x05000000);
-			evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
-			evo_data(push, 0x00000000);
-		}
-
-		if (update) {
-			evo_mthd(push, 0x0080, 1);
-			evo_data(push, 0x00000000);
-		}
-
-		evo_kick(push, dev, EVO_MASTER);
-	}
-}
-
-static void
-nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nvd0_crtc_prepare(struct drm_crtc *crtc)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	u32 *push;
-
-	nvd0_display_flip_stop(crtc);
-
-	push = evo_wait(crtc->dev, EVO_MASTER, 2);
-	if (push) {
-		evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
-		evo_data(push, 0x00000000);
-		evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
-		evo_data(push, 0x03000000);
-		evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
-		evo_data(push, 0x00000000);
-		evo_kick(push, crtc->dev, EVO_MASTER);
-	}
-
-	nvd0_crtc_cursor_show(nv_crtc, false, false);
-}
-
-static void
-nvd0_crtc_commit(struct drm_crtc *crtc)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	u32 *push;
-
-	push = evo_wait(crtc->dev, EVO_MASTER, 32);
-	if (push) {
-		evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
-		evo_data(push, nv_crtc->fb.tile_flags);
-		evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
-		evo_data(push, 0x83000000);
-		evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
-		evo_data(push, 0x00000000);
-		evo_data(push, 0x00000000);
-		evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
-		evo_data(push, NvEvoVRAM);
-		evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
-		evo_data(push, 0xffffff00);
-		evo_kick(push, crtc->dev, EVO_MASTER);
-	}
-
-	nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
-	nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-}
-
-static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
-		     struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
-static int
-nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
-{
-	struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
-	int ret;
-
-	ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
-	if (ret)
-		return ret;
-
-	if (old_fb) {
-		nvfb = nouveau_framebuffer(old_fb);
-		nouveau_bo_unpin(nvfb->nvbo);
-	}
-
-	return 0;
-}
-
-static int
-nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
-		   struct drm_display_mode *mode, int x, int y,
-		   struct drm_framebuffer *old_fb)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct nouveau_connector *nv_connector;
-	u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
-	u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
-	u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
-	u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
-	u32 vblan2e = 0, vblan2s = 1;
-	u32 *push;
-	int ret;
-
-	hactive = mode->htotal;
-	hsynce  = mode->hsync_end - mode->hsync_start - 1;
-	hbackp  = mode->htotal - mode->hsync_end;
-	hblanke = hsynce + hbackp;
-	hfrontp = mode->hsync_start - mode->hdisplay;
-	hblanks = mode->htotal - hfrontp - 1;
-
-	vactive = mode->vtotal * vscan / ilace;
-	vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
-	vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
-	vblanke = vsynce + vbackp;
-	vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
-	vblanks = vactive - vfrontp - 1;
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		vblan2e = vactive + vsynce + vbackp;
-		vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
-		vactive = (vactive * 2) + 1;
-	}
-
-	ret = nvd0_crtc_swap_fbs(crtc, old_fb);
-	if (ret)
-		return ret;
-
-	push = evo_wait(crtc->dev, EVO_MASTER, 64);
-	if (push) {
-		evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
-		evo_data(push, 0x00000000);
-		evo_data(push, (vactive << 16) | hactive);
-		evo_data(push, ( vsynce << 16) | hsynce);
-		evo_data(push, (vblanke << 16) | hblanke);
-		evo_data(push, (vblanks << 16) | hblanks);
-		evo_data(push, (vblan2e << 16) | vblan2s);
-		evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
-		evo_data(push, 0x00000000); /* ??? */
-		evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
-		evo_data(push, mode->clock * 1000);
-		evo_data(push, 0x00200000); /* ??? */
-		evo_data(push, mode->clock * 1000);
-		evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
-		evo_data(push, 0x00000311);
-		evo_data(push, 0x00000100);
-		evo_kick(push, crtc->dev, EVO_MASTER);
-	}
-
-	nv_connector = nouveau_crtc_connector_get(nv_crtc);
-	nvd0_crtc_set_dither(nv_crtc, false);
-	nvd0_crtc_set_scale(nv_crtc, false);
-	nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
-	return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-			struct drm_framebuffer *old_fb)
-{
-	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	int ret;
-
-	if (!crtc->fb) {
-		NV_DEBUG(drm, "No FB bound\n");
-		return 0;
-	}
-
-	ret = nvd0_crtc_swap_fbs(crtc, old_fb);
-	if (ret)
-		return ret;
-
-	nvd0_display_flip_stop(crtc);
-	nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
-	nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-	return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
-			       struct drm_framebuffer *fb, int x, int y,
-			       enum mode_set_atomic state)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	nvd0_display_flip_stop(crtc);
-	nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
-	return 0;
-}
-
-static void
-nvd0_crtc_lut_load(struct drm_crtc *crtc)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
-	int i;
-
-	for (i = 0; i < 256; i++) {
-		writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
-		writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
-		writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
-	}
-}
-
-static int
-nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
-		     uint32_t handle, uint32_t width, uint32_t height)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
-	struct drm_gem_object *gem;
-	struct nouveau_bo *nvbo;
-	bool visible = (handle != 0);
-	int i, ret = 0;
-
-	if (visible) {
-		if (width != 64 || height != 64)
-			return -EINVAL;
-
-		gem = drm_gem_object_lookup(dev, file_priv, handle);
-		if (unlikely(!gem))
-			return -ENOENT;
-		nvbo = nouveau_gem_object(gem);
-
-		ret = nouveau_bo_map(nvbo);
-		if (ret == 0) {
-			for (i = 0; i < 64 * 64; i++) {
-				u32 v = nouveau_bo_rd32(nvbo, i);
-				nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
-			}
-			nouveau_bo_unmap(nvbo);
-		}
-
-		drm_gem_object_unreference_unlocked(gem);
-	}
-
-	if (visible != nv_crtc->cursor.visible) {
-		nvd0_crtc_cursor_show(nv_crtc, visible, true);
-		nv_crtc->cursor.visible = visible;
-	}
-
-	return ret;
-}
-
-static int
-nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	int ch = EVO_CURS(nv_crtc->index);
-
-	evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
-	evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
-	return 0;
-}
-
-static void
-nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-		    uint32_t start, uint32_t size)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	u32 end = max(start + size, (u32)256);
-	u32 i;
-
-	for (i = start; i < end; i++) {
-		nv_crtc->lut.r[i] = r[i];
-		nv_crtc->lut.g[i] = g[i];
-		nv_crtc->lut.b[i] = b[i];
-	}
-
-	nvd0_crtc_lut_load(crtc);
-}
-
-static void
-nvd0_crtc_destroy(struct drm_crtc *crtc)
-{
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	nouveau_bo_unmap(nv_crtc->cursor.nvbo);
-	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-	nouveau_bo_unmap(nv_crtc->lut.nvbo);
-	nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-	drm_crtc_cleanup(crtc);
-	kfree(crtc);
-}
-
-static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
-	.dpms = nvd0_crtc_dpms,
-	.prepare = nvd0_crtc_prepare,
-	.commit = nvd0_crtc_commit,
-	.mode_fixup = nvd0_crtc_mode_fixup,
-	.mode_set = nvd0_crtc_mode_set,
-	.mode_set_base = nvd0_crtc_mode_set_base,
-	.mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
-	.load_lut = nvd0_crtc_lut_load,
-};
-
-static const struct drm_crtc_funcs nvd0_crtc_func = {
-	.cursor_set = nvd0_crtc_cursor_set,
-	.cursor_move = nvd0_crtc_cursor_move,
-	.gamma_set = nvd0_crtc_gamma_set,
-	.set_config = drm_crtc_helper_set_config,
-	.destroy = nvd0_crtc_destroy,
-	.page_flip = nouveau_crtc_page_flip,
-};
-
-static void
-nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
-static int
-nvd0_crtc_create(struct drm_device *dev, int index)
-{
-	struct nouveau_crtc *nv_crtc;
-	struct drm_crtc *crtc;
-	int ret, i;
-
-	nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
-	if (!nv_crtc)
-		return -ENOMEM;
-
-	nv_crtc->index = index;
-	nv_crtc->set_dither = nvd0_crtc_set_dither;
-	nv_crtc->set_scale = nvd0_crtc_set_scale;
-	nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
-	nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
-	for (i = 0; i < 256; i++) {
-		nv_crtc->lut.r[i] = i << 8;
-		nv_crtc->lut.g[i] = i << 8;
-		nv_crtc->lut.b[i] = i << 8;
-	}
-
-	crtc = &nv_crtc->base;
-	drm_crtc_init(dev, crtc, &nvd0_crtc_func);
-	drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
-	drm_mode_crtc_set_gamma_size(crtc, 256);
-
-	ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
-			     0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
-	if (!ret) {
-		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-		if (!ret)
-			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
-		if (ret)
-			nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-	}
-
-	if (ret)
-		goto out;
-
-	ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
-			     0, 0x0000, NULL, &nv_crtc->lut.nvbo);
-	if (!ret) {
-		ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
-		if (!ret)
-			ret = nouveau_bo_map(nv_crtc->lut.nvbo);
-		if (ret)
-			nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-	}
-
-	if (ret)
-		goto out;
-
-	nvd0_crtc_lut_load(crtc);
-
-out:
-	if (ret)
-		nvd0_crtc_destroy(crtc);
-	return ret;
-}
-
-/******************************************************************************
- * DAC
- *****************************************************************************/
-static void
-nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_device *device = nouveau_dev(dev);
-	int or = nv_encoder->or;
-	u32 dpms_ctrl;
-
-	dpms_ctrl = 0x80000000;
-	if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
-		dpms_ctrl |= 0x00000001;
-	if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
-		dpms_ctrl |= 0x00000004;
-
-	nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-	nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
-	nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-}
-
-static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder,
-		    const struct drm_display_mode *mode,
-		    struct drm_display_mode *adjusted_mode)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_connector *nv_connector;
-
-	nv_connector = nouveau_encoder_connector_get(nv_encoder);
-	if (nv_connector && nv_connector->native_mode) {
-		if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
-			int id = adjusted_mode->base.id;
-			*adjusted_mode = *nv_connector->native_mode;
-			adjusted_mode->base.id = id;
-		}
-	}
-
-	return true;
-}
-
-static void
-nvd0_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-		  struct drm_display_mode *adjusted_mode)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	u32 syncs, magic, *push;
-
-	syncs = 0x00000001;
-	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-		syncs |= 0x00000008;
-	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-		syncs |= 0x00000010;
-
-	magic = 0x31ec6000 | (nv_crtc->index << 25);
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		magic |= 0x00000001;
-
-	nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
-	push = evo_wait(encoder->dev, EVO_MASTER, 8);
-	if (push) {
-		evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
-		evo_data(push, syncs);
-		evo_data(push, magic);
-		evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
-		evo_data(push, 1 << nv_crtc->index);
-		evo_data(push, 0x00ff);
-		evo_kick(push, encoder->dev, EVO_MASTER);
-	}
-
-	nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_dac_disconnect(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	u32 *push;
-
-	if (nv_encoder->crtc) {
-		nvd0_crtc_prepare(nv_encoder->crtc);
-
-		push = evo_wait(dev, EVO_MASTER, 4);
-		if (push) {
-			evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
-			evo_data(push, 0x00000000);
-			evo_mthd(push, 0x0080, 1);
-			evo_data(push, 0x00000000);
-			evo_kick(push, dev, EVO_MASTER);
-		}
-
-		nv_encoder->crtc = NULL;
-	}
-}
-
-static enum drm_connector_status
-nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
-	enum drm_connector_status status = connector_status_disconnected;
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_device *device = nouveau_dev(dev);
-	int or = nv_encoder->or;
-	u32 load;
-
-	nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
-	udelay(9500);
-	nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
-
-	load = nv_rd32(device, 0x61a00c + (or * 0x800));
-	if ((load & 0x38000000) == 0x38000000)
-		status = connector_status_connected;
-
-	nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
-	return status;
-}
-
-static void
-nvd0_dac_destroy(struct drm_encoder *encoder)
-{
-	drm_encoder_cleanup(encoder);
-	kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
-	.dpms = nvd0_dac_dpms,
-	.mode_fixup = nvd0_dac_mode_fixup,
-	.prepare = nvd0_dac_disconnect,
-	.commit = nvd0_dac_commit,
-	.mode_set = nvd0_dac_mode_set,
-	.disable = nvd0_dac_disconnect,
-	.get_crtc = nvd0_display_crtc_get,
-	.detect = nvd0_dac_detect
-};
-
-static const struct drm_encoder_funcs nvd0_dac_func = {
-	.destroy = nvd0_dac_destroy,
-};
-
-static int
-nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
-	struct drm_device *dev = connector->dev;
-	struct nouveau_encoder *nv_encoder;
-	struct drm_encoder *encoder;
-
-	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-	if (!nv_encoder)
-		return -ENOMEM;
-	nv_encoder->dcb = dcbe;
-	nv_encoder->or = ffs(dcbe->or) - 1;
-
-	encoder = to_drm_encoder(nv_encoder);
-	encoder->possible_crtcs = dcbe->heads;
-	encoder->possible_clones = 0;
-	drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
-	drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
-
-	drm_mode_connector_attach_encoder(connector, encoder);
-	return 0;
-}
-
-/******************************************************************************
- * Audio
- *****************************************************************************/
-static void
-nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_connector *nv_connector;
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_device *device = nouveau_dev(dev);
-	int i, or = nv_encoder->or * 0x30;
-
-	nv_connector = nouveau_encoder_connector_get(nv_encoder);
-	if (!drm_detect_monitor_audio(nv_connector->edid))
-		return;
-
-	nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
-
-	drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
-	if (nv_connector->base.eld[0]) {
-		u8 *eld = nv_connector->base.eld;
-
-		for (i = 0; i < eld[2] * 4; i++)
-			nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
-		for (i = eld[2] * 4; i < 0x60; i++)
-			nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
-
-		nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
-	}
-}
-
-static void
-nvd0_audio_disconnect(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_device *device = nouveau_dev(dev);
-	int or = nv_encoder->or * 0x30;
-
-	nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
-}
-
-/******************************************************************************
- * HDMI
- *****************************************************************************/
-static void
-nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct nouveau_connector *nv_connector;
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_device *device = nouveau_dev(dev);
-	int head = nv_crtc->index * 0x800;
-	u32 rekey = 56; /* binary driver, and tegra constant */
-	u32 max_ac_packet;
-
-	nv_connector = nouveau_encoder_connector_get(nv_encoder);
-	if (!drm_detect_hdmi_monitor(nv_connector->edid))
-		return;
-
-	max_ac_packet  = mode->htotal - mode->hdisplay;
-	max_ac_packet -= rekey;
-	max_ac_packet -= 18; /* constant from tegra */
-	max_ac_packet /= 32;
-
-	/* AVI InfoFrame */
-	nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-	nv_wr32(device, 0x61671c + head, 0x000d0282);
-	nv_wr32(device, 0x616720 + head, 0x0000006f);
-	nv_wr32(device, 0x616724 + head, 0x00000000);
-	nv_wr32(device, 0x616728 + head, 0x00000000);
-	nv_wr32(device, 0x61672c + head, 0x00000000);
-	nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
-
-	/* ??? InfoFrame? */
-	nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
-	nv_wr32(device, 0x6167ac + head, 0x00000010);
-	nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
-
-	/* HDMI_CTRL */
-	nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
-						  max_ac_packet << 16);
-
-	/* NFI, audio doesn't work without it though.. */
-	nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
-
-	nvd0_audio_mode_set(encoder, mode);
-}
-
-static void
-nvd0_hdmi_disconnect(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_device *device = nouveau_dev(dev);
-	int head = nv_crtc->index * 0x800;
-
-	nvd0_audio_disconnect(encoder);
-
-	nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
-	nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
-	nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-}
-
-/******************************************************************************
- * SOR
- *****************************************************************************/
-static inline u32
-nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
-	static const u8 nvd0[] = { 16, 8, 0, 24 };
-	return nvd0[lane];
-}
-
-static void
-nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-	const u32 loff = (or * 0x800) + (link * 0x80);
-	nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-}
-
-static void
-nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
-		      u8 lane, u8 swing, u8 preem)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-	const u32 loff = (or * 0x800) + (link * 0x80);
-	u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
-	u32 mask = 0x000000ff << shift;
-	u8 *table, *entry, *config = NULL;
-
-	switch (swing) {
-	case 0: preem += 0; break;
-	case 1: preem += 4; break;
-	case 2: preem += 7; break;
-	case 3: preem += 9; break;
-	}
-
-	table = nouveau_dp_bios_data(dev, dcb, &entry);
-	if (table) {
-		if (table[0] == 0x30) {
-			config  = entry + table[4];
-			config += table[5] * preem;
-		} else
-		if (table[0] == 0x40) {
-			config  = table + table[1];
-			config += table[2] * table[3];
-			config += table[6] * preem;
-		}
-	}
-
-	if (!config) {
-		NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
-		return;
-	}
-
-	nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
-	nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
-	nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
-	nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
-}
-
-static void
-nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
-		     int link_nr, u32 link_bw, bool enhframe)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-	const u32 loff = (or * 0x800) + (link * 0x80);
-	const u32 soff = (or * 0x800);
-	u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
-	u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
-	u32 script = 0x0000, lane_mask = 0;
-	u8 *table, *entry;
-	int i;
-
-	link_bw /= 27000;
-
-	table = nouveau_dp_bios_data(dev, dcb, &entry);
-	if (table) {
-		if      (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
-		else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
-		else                       entry = NULL;
-
-		while (entry) {
-			if (entry[0] >= link_bw)
-				break;
-			entry += 3;
-		}
-
-		nouveau_bios_run_init_table(dev, script, dcb, crtc);
-	}
-
-	clksor |= link_bw << 18;
-	dpctrl |= ((1 << link_nr) - 1) << 16;
-	if (enhframe)
-		dpctrl |= 0x00004000;
-
-	for (i = 0; i < link_nr; i++)
-		lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
-
-	nv_wr32(device, 0x612300 + soff, clksor);
-	nv_wr32(device, 0x61c10c + loff, dpctrl);
-	nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
-}
-
-static void
-nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
-		     u32 *link_nr, u32 *link_bw)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-	const u32 loff = (or * 0x800) + (link * 0x80);
-	const u32 soff = (or * 0x800);
-	u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
-	u32 clksor = nv_rd32(device, 0x612300 + soff);
-
-	if      (dpctrl > 0x00030000) *link_nr = 4;
-	else if (dpctrl > 0x00010000) *link_nr = 2;
-	else			      *link_nr = 1;
-
-	*link_bw  = (clksor & 0x007c0000) >> 18;
-	*link_bw *= 27000;
-}
-
-static void
-nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
-		    u32 crtc, u32 datarate)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	const u32 symbol = 100000;
-	const u32 TU = 64;
-	u32 link_nr, link_bw;
-	u64 ratio, value;
-
-	nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
-
-	ratio  = datarate;
-	ratio *= symbol;
-	do_div(ratio, link_nr * link_bw);
-
-	value  = (symbol - ratio) * TU;
-	value *= ratio;
-	do_div(value, symbol);
-	do_div(value, symbol);
-
-	value += 5;
-	value |= 0x08000000;
-
-	nv_wr32(device, 0x616610 + (crtc * 0x800), value);
-}
-
-static void
-nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct drm_encoder *partner;
-	int or = nv_encoder->or;
-	u32 dpms_ctrl;
-
-	nv_encoder->last_dpms = mode;
-
-	list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
-		struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
-		if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
-			continue;
-
-		if (nv_partner != nv_encoder &&
-		    nv_partner->dcb->or == nv_encoder->dcb->or) {
-			if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
-				return;
-			break;
-		}
-	}
-
-	dpms_ctrl  = (mode == DRM_MODE_DPMS_ON);
-	dpms_ctrl |= 0x80000000;
-
-	nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
-	nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
-	nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
-	nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
-
-	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-		struct dp_train_func func = {
-			.link_set = nvd0_sor_dp_link_set,
-			.train_set = nvd0_sor_dp_train_set,
-			.train_adj = nvd0_sor_dp_train_adj
-		};
-
-		nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
-	}
-}
-
-static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder,
-		    const struct drm_display_mode *mode,
-		    struct drm_display_mode *adjusted_mode)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_connector *nv_connector;
-
-	nv_connector = nouveau_encoder_connector_get(nv_encoder);
-	if (nv_connector && nv_connector->native_mode) {
-		if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
-			int id = adjusted_mode->base.id;
-			*adjusted_mode = *nv_connector->native_mode;
-			adjusted_mode->base.id = id;
-		}
-	}
-
-	return true;
-}
-
-static void
-nvd0_sor_disconnect(struct drm_encoder *encoder)
-{
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	u32 *push;
-
-	if (nv_encoder->crtc) {
-		nvd0_crtc_prepare(nv_encoder->crtc);
-
-		push = evo_wait(dev, EVO_MASTER, 4);
-		if (push) {
-			evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
-			evo_data(push, 0x00000000);
-			evo_mthd(push, 0x0080, 1);
-			evo_data(push, 0x00000000);
-			evo_kick(push, dev, EVO_MASTER);
-		}
-
-		nvd0_hdmi_disconnect(encoder);
-
-		nv_encoder->crtc = NULL;
-		nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-	}
-}
-
-static void
-nvd0_sor_prepare(struct drm_encoder *encoder)
-{
-	nvd0_sor_disconnect(encoder);
-	if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
-		evo_sync(encoder->dev, EVO_MASTER);
-}
-
-static void
-nvd0_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
-		  struct drm_display_mode *mode)
-{
-	struct drm_device *dev = encoder->dev;
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct nouveau_connector *nv_connector;
-	struct nvbios *bios = &drm->vbios;
-	u32 mode_ctrl = (1 << nv_crtc->index);
-	u32 syncs, magic, *push;
-	u32 or_config;
-
-	syncs = 0x00000001;
-	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-		syncs |= 0x00000008;
-	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-		syncs |= 0x00000010;
-
-	magic = 0x31ec6000 | (nv_crtc->index << 25);
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		magic |= 0x00000001;
-
-	nv_connector = nouveau_encoder_connector_get(nv_encoder);
-	switch (nv_encoder->dcb->type) {
-	case DCB_OUTPUT_TMDS:
-		if (nv_encoder->dcb->sorconf.link & 1) {
-			if (mode->clock < 165000)
-				mode_ctrl |= 0x00000100;
-			else
-				mode_ctrl |= 0x00000500;
-		} else {
-			mode_ctrl |= 0x00000200;
-		}
-
-		or_config = (mode_ctrl & 0x00000f00) >> 8;
-		if (mode->clock >= 165000)
-			or_config |= 0x0100;
-
-		nvd0_hdmi_mode_set(encoder, mode);
-		break;
-	case DCB_OUTPUT_LVDS:
-		or_config = (mode_ctrl & 0x00000f00) >> 8;
-		if (bios->fp_no_ddc) {
-			if (bios->fp.dual_link)
-				or_config |= 0x0100;
-			if (bios->fp.if_is_24bit)
-				or_config |= 0x0200;
-		} else {
-			if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
-				if (((u8 *)nv_connector->edid)[121] == 2)
-					or_config |= 0x0100;
-			} else
-			if (mode->clock >= bios->fp.duallink_transition_clk) {
-				or_config |= 0x0100;
-			}
-
-			if (or_config & 0x0100) {
-				if (bios->fp.strapless_is_24bit & 2)
-					or_config |= 0x0200;
-			} else {
-				if (bios->fp.strapless_is_24bit & 1)
-					or_config |= 0x0200;
-			}
-
-			if (nv_connector->base.display_info.bpc == 8)
-				or_config |= 0x0200;
-
-		}
-		break;
-	case DCB_OUTPUT_DP:
-		if (nv_connector->base.display_info.bpc == 6) {
-			nv_encoder->dp.datarate = mode->clock * 18 / 8;
-			syncs |= 0x00000002 << 6;
-		} else {
-			nv_encoder->dp.datarate = mode->clock * 24 / 8;
-			syncs |= 0x00000005 << 6;
-		}
-
-		if (nv_encoder->dcb->sorconf.link & 1)
-			mode_ctrl |= 0x00000800;
-		else
-			mode_ctrl |= 0x00000900;
-
-		or_config = (mode_ctrl & 0x00000f00) >> 8;
-		break;
-	default:
-		BUG_ON(1);
-		break;
-	}
-
-	nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
-	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-		nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
-					 nv_encoder->dp.datarate);
-	}
-
-	push = evo_wait(dev, EVO_MASTER, 8);
-	if (push) {
-		evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
-		evo_data(push, syncs);
-		evo_data(push, magic);
-		evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
-		evo_data(push, mode_ctrl);
-		evo_data(push, or_config);
-		evo_kick(push, dev, EVO_MASTER);
-	}
-
-	nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_sor_destroy(struct drm_encoder *encoder)
-{
-	drm_encoder_cleanup(encoder);
-	kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
-	.dpms = nvd0_sor_dpms,
-	.mode_fixup = nvd0_sor_mode_fixup,
-	.prepare = nvd0_sor_prepare,
-	.commit = nvd0_sor_commit,
-	.mode_set = nvd0_sor_mode_set,
-	.disable = nvd0_sor_disconnect,
-	.get_crtc = nvd0_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nvd0_sor_func = {
-	.destroy = nvd0_sor_destroy,
-};
-
-static int
-nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
-	struct drm_device *dev = connector->dev;
-	struct nouveau_encoder *nv_encoder;
-	struct drm_encoder *encoder;
-
-	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-	if (!nv_encoder)
-		return -ENOMEM;
-	nv_encoder->dcb = dcbe;
-	nv_encoder->or = ffs(dcbe->or) - 1;
-	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
-	encoder = to_drm_encoder(nv_encoder);
-	encoder->possible_crtcs = dcbe->heads;
-	encoder->possible_clones = 0;
-	drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
-	drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
-
-	drm_mode_connector_attach_encoder(connector, encoder);
-	return 0;
-}
-
-/******************************************************************************
- * IRQ
- *****************************************************************************/
-static struct dcb_output *
-lookup_dcb(struct drm_device *dev, int id, u32 mc)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	int type, or, i, link = -1;
-
-	if (id < 4) {
-		type = DCB_OUTPUT_ANALOG;
-		or   = id;
-	} else {
-		switch (mc & 0x00000f00) {
-		case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
-		case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
-		case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
-		case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
-		case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
-		case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
-		default:
-			NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
-			return NULL;
-		}
-
-		or = id - 4;
-	}
-
-	for (i = 0; i < drm->vbios.dcb.entries; i++) {
-		struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
-		if (dcb->type == type && (dcb->or & (1 << or)) &&
-		    (link < 0 || link == !(dcb->sorconf.link & 1)))
-			return dcb;
-	}
-
-	NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
-	return NULL;
-}
-
-static void
-nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct dcb_output *dcb;
-	int i;
-
-	for (i = 0; mask && i < 8; i++) {
-		u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
-		if (!(mcc & (1 << crtc)))
-			continue;
-
-		dcb = lookup_dcb(dev, i, mcc);
-		if (!dcb)
-			continue;
-
-		nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
-	}
-
-	nv_wr32(device, 0x6101d4, 0x00000000);
-	nv_wr32(device, 0x6109d4, 0x00000000);
-	nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct dcb_output *dcb;
-	u32 or, tmp, pclk;
-	int i;
-
-	for (i = 0; mask && i < 8; i++) {
-		u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
-		if (!(mcc & (1 << crtc)))
-			continue;
-
-		dcb = lookup_dcb(dev, i, mcc);
-		if (!dcb)
-			continue;
-
-		nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
-	}
-
-	pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-	NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
-			  crtc, pclk, mask);
-	if (pclk && (mask & 0x00010000)) {
-		nv50_crtc_set_clock(dev, crtc, pclk);
-	}
-
-	for (i = 0; mask && i < 8; i++) {
-		u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
-		u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
-		if (!(mcp & (1 << crtc)))
-			continue;
-
-		dcb = lookup_dcb(dev, i, mcp);
-		if (!dcb)
-			continue;
-		or = ffs(dcb->or) - 1;
-
-		nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
-
-		nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
-		switch (dcb->type) {
-		case DCB_OUTPUT_ANALOG:
-			nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
-			break;
-		case DCB_OUTPUT_TMDS:
-		case DCB_OUTPUT_LVDS:
-		case DCB_OUTPUT_DP:
-			if (cfg & 0x00000100)
-				tmp = 0x00000101;
-			else
-				tmp = 0x00000000;
-
-			nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
-			break;
-		default:
-			break;
-		}
-
-		break;
-	}
-
-	nv_wr32(device, 0x6101d4, 0x00000000);
-	nv_wr32(device, 0x6109d4, 0x00000000);
-	nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct dcb_output *dcb;
-	int pclk, i;
-
-	pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-
-	for (i = 0; mask && i < 8; i++) {
-		u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
-		u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
-		if (!(mcp & (1 << crtc)))
-			continue;
-
-		dcb = lookup_dcb(dev, i, mcp);
-		if (!dcb)
-			continue;
-
-		nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
-	}
-
-	nv_wr32(device, 0x6101d4, 0x00000000);
-	nv_wr32(device, 0x6109d4, 0x00000000);
-	nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_bh(unsigned long data)
-{
-	struct drm_device *dev = (struct drm_device *)data;
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvd0_display *disp = nvd0_display(dev);
-	u32 mask = 0, crtc = ~0;
-	int i;
-
-	if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
-		NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
-		NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
-			 nv_rd32(device, 0x6101d0),
-			 nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
-		for (i = 0; i < 8; i++) {
-			NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
-				i < 4 ? "DAC" : "SOR", i,
-				nv_rd32(device, 0x640180 + (i * 0x20)),
-				nv_rd32(device, 0x660180 + (i * 0x20)));
-		}
-	}
-
-	while (!mask && ++crtc < dev->mode_config.num_crtc)
-		mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
-
-	if (disp->modeset & 0x00000001)
-		nvd0_display_unk1_handler(dev, crtc, mask);
-	if (disp->modeset & 0x00000002)
-		nvd0_display_unk2_handler(dev, crtc, mask);
-	if (disp->modeset & 0x00000004)
-		nvd0_display_unk4_handler(dev, crtc, mask);
-}
-
-void
-nvd0_display_intr(struct drm_device *dev)
-{
-	struct nvd0_display *disp = nvd0_display(dev);
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 intr = nv_rd32(device, 0x610088);
-
-	if (intr & 0x00000001) {
-		u32 stat = nv_rd32(device, 0x61008c);
-		nv_wr32(device, 0x61008c, stat);
-		intr &= ~0x00000001;
-	}
-
-	if (intr & 0x00000002) {
-		u32 stat = nv_rd32(device, 0x61009c);
-		int chid = ffs(stat) - 1;
-		if (chid >= 0) {
-			u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
-			u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
-			u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
-
-			NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
-				     "0x%08x 0x%08x\n",
-				chid, (mthd & 0x0000ffc), data, mthd, unkn);
-			nv_wr32(device, 0x61009c, (1 << chid));
-			nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
-		}
-
-		intr &= ~0x00000002;
-	}
-
-	if (intr & 0x00100000) {
-		u32 stat = nv_rd32(device, 0x6100ac);
-
-		if (stat & 0x00000007) {
-			disp->modeset = stat;
-			tasklet_schedule(&disp->tasklet);
-
-			nv_wr32(device, 0x6100ac, (stat & 0x00000007));
-			stat &= ~0x00000007;
-		}
-
-		if (stat) {
-			NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
-			nv_wr32(device, 0x6100ac, stat);
-		}
-
-		intr &= ~0x00100000;
-	}
-
-	intr &= ~0x0f000000; /* vblank, handled in core */
-	if (intr)
-		NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
-}
-
-/******************************************************************************
- * Init
- *****************************************************************************/
-void
-nvd0_display_fini(struct drm_device *dev)
-{
-	int i;
-
-	/* fini cursors + overlays + flips */
-	for (i = 1; i >= 0; i--) {
-		evo_fini_pio(dev, EVO_CURS(i));
-		evo_fini_pio(dev, EVO_OIMM(i));
-		evo_fini_dma(dev, EVO_OVLY(i));
-		evo_fini_dma(dev, EVO_FLIP(i));
-	}
-
-	/* fini master */
-	evo_fini_dma(dev, EVO_MASTER);
-}
-
-int
-nvd0_display_init(struct drm_device *dev)
-{
-	struct nvd0_display *disp = nvd0_display(dev);
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	int ret, i;
-	u32 *push;
-
-	if (nv_rd32(device, 0x6100ac) & 0x00000100) {
-		nv_wr32(device, 0x6100ac, 0x00000100);
-		nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
-		if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
-			NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
-				 nv_rd32(device, 0x6194e8));
-			return -EBUSY;
-		}
-	}
-
-	/* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
-	 * work at all unless you do the SOR part below.
-	 */
-	for (i = 0; i < 3; i++) {
-		u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
-		nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
-	}
-
-	for (i = 0; i < 4; i++) {
-		u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
-		nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
-	}
-
-	for (i = 0; i < dev->mode_config.num_crtc; i++) {
-		u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
-		u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
-		u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
-		nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
-		nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
-		nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
-	}
-
-	/* point at our hash table / objects, enable interrupts */
-	nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
-	nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
-
-	/* init master */
-	ret = evo_init_dma(dev, EVO_MASTER);
-	if (ret)
-		goto error;
-
-	/* init flips + overlays + cursors */
-	for (i = 0; i < dev->mode_config.num_crtc; i++) {
-		if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
-		    (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
-		    (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
-		    (ret = evo_init_pio(dev, EVO_CURS(i))))
-			goto error;
-	}
-
-	push = evo_wait(dev, EVO_MASTER, 32);
-	if (!push) {
-		ret = -EBUSY;
-		goto error;
-	}
-	evo_mthd(push, 0x0088, 1);
-	evo_data(push, NvEvoSync);
-	evo_mthd(push, 0x0084, 1);
-	evo_data(push, 0x00000000);
-	evo_mthd(push, 0x0084, 1);
-	evo_data(push, 0x80000000);
-	evo_mthd(push, 0x008c, 1);
-	evo_data(push, 0x00000000);
-	evo_kick(push, dev, EVO_MASTER);
-
-error:
-	if (ret)
-		nvd0_display_fini(dev);
-	return ret;
-}
-
-void
-nvd0_display_destroy(struct drm_device *dev)
-{
-	struct nvd0_display *disp = nvd0_display(dev);
-	struct pci_dev *pdev = dev->pdev;
-	int i;
-
-	for (i = 0; i < EVO_DMA_NR; i++) {
-		struct evo *evo = &disp->evo[i];
-		pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
-	}
-
-	nouveau_gpuobj_ref(NULL, &disp->mem);
-	nouveau_bo_unmap(disp->sync);
-	nouveau_bo_ref(NULL, &disp->sync);
-
-	nouveau_display(dev)->priv = NULL;
-	kfree(disp);
-}
-
-int
-nvd0_display_create(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_bar *bar = nouveau_bar(device);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	struct dcb_table *dcb = &drm->vbios.dcb;
-	struct drm_connector *connector, *tmp;
-	struct pci_dev *pdev = dev->pdev;
-	struct nvd0_display *disp;
-	struct dcb_output *dcbe;
-	int crtcs, ret, i;
-
-	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
-	if (!disp)
-		return -ENOMEM;
-
-	nouveau_display(dev)->priv = disp;
-	nouveau_display(dev)->dtor = nvd0_display_destroy;
-	nouveau_display(dev)->init = nvd0_display_init;
-	nouveau_display(dev)->fini = nvd0_display_fini;
-
-	/* create crtc objects to represent the hw heads */
-	crtcs = nv_rd32(device, 0x022448);
-	for (i = 0; i < crtcs; i++) {
-		ret = nvd0_crtc_create(dev, i);
-		if (ret)
-			goto out;
-	}
-
-	/* create encoder/connector objects based on VBIOS DCB table */
-	for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
-		connector = nouveau_connector_create(dev, dcbe->connector);
-		if (IS_ERR(connector))
-			continue;
-
-		if (dcbe->location != DCB_LOC_ON_CHIP) {
-			NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
-				dcbe->type, ffs(dcbe->or) - 1);
-			continue;
-		}
-
-		switch (dcbe->type) {
-		case DCB_OUTPUT_TMDS:
-		case DCB_OUTPUT_LVDS:
-		case DCB_OUTPUT_DP:
-			nvd0_sor_create(connector, dcbe);
-			break;
-		case DCB_OUTPUT_ANALOG:
-			nvd0_dac_create(connector, dcbe);
-			break;
-		default:
-			NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
-				dcbe->type, ffs(dcbe->or) - 1);
-			continue;
-		}
-	}
-
-	/* cull any connectors we created that don't have an encoder */
-	list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
-		if (connector->encoder_ids[0])
-			continue;
-
-		NV_WARN(drm, "%s has no encoders, removing\n",
-			drm_get_connector_name(connector));
-		connector->funcs->destroy(connector);
-	}
-
-	/* setup interrupt handling */
-	tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
-
-	/* small shared memory area we use for notifiers and semaphores */
-	ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
-			     0, 0x0000, NULL, &disp->sync);
-	if (!ret) {
-		ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
-		if (!ret)
-			ret = nouveau_bo_map(disp->sync);
-		if (ret)
-			nouveau_bo_ref(NULL, &disp->sync);
-	}
-
-	if (ret)
-		goto out;
-
-	/* hash table and dma objects for the memory areas we care about */
-	ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
-	if (ret)
-		goto out;
-
-	/* create evo dma channels */
-	for (i = 0; i < EVO_DMA_NR; i++) {
-		struct evo *evo = &disp->evo[i];
-		u64 offset = disp->sync->bo.offset;
-		u32 dmao = 0x1000 + (i * 0x100);
-		u32 hash = 0x0000 + (i * 0x040);
-
-		evo->idx = i;
-		evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
-		evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
-		if (!evo->ptr) {
-			ret = -ENOMEM;
-			goto out;
-		}
-
-		nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
-		nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
-		nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
-		nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
-		nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
-		nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
-						((dmao + 0x00) << 9));
-
-		nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
-		nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
-		nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
-		nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
-		nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
-						((dmao + 0x20) << 9));
-
-		nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
-		nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
-		nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
-		nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
-		nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
-						((dmao + 0x40) << 9));
-
-		nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
-		nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
-		nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
-		nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
-		nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
-						((dmao + 0x60) << 9));
-	}
-
-	bar->flush(bar);
-
-out:
-	if (ret)
-		nvd0_display_destroy(dev);
-	return ret;
-}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 24d932f53203..9175615bbd8a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 		/* use frac fb div on APUs */
 		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
 			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+		if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 	} else {
 		radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
 
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index d5699fe4f1e8..064023bed480 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -34,8 +34,7 @@
 
 /* move these to drm_dp_helper.c/h */
 #define DP_LINK_CONFIGURATION_SIZE 9
-#define DP_LINK_STATUS_SIZE	   6
-#define DP_DPCD_SIZE	           8
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
 
 static char *voltage_names[] = {
         "0.4V", "0.6V", "0.8V", "1.2V"
@@ -290,78 +289,6 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 
 /***** general DP utility functions *****/
 
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
-{
-	return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
-			     int lane)
-{
-	int i = DP_LANE0_1_STATUS + (lane >> 1);
-	int s = (lane & 1) * 4;
-	u8 l = dp_link_status(link_status, i);
-	return (l >> s) & 0xf;
-}
-
-static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
-				 int lane_count)
-{
-	int lane;
-	u8 lane_status;
-
-	for (lane = 0; lane < lane_count; lane++) {
-		lane_status = dp_get_lane_status(link_status, lane);
-		if ((lane_status & DP_LANE_CR_DONE) == 0)
-			return false;
-	}
-	return true;
-}
-
-static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
-			     int lane_count)
-{
-	u8 lane_align;
-	u8 lane_status;
-	int lane;
-
-	lane_align = dp_link_status(link_status,
-				    DP_LANE_ALIGN_STATUS_UPDATED);
-	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
-		return false;
-	for (lane = 0; lane < lane_count; lane++) {
-		lane_status = dp_get_lane_status(link_status, lane);
-		if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
-			return false;
-	}
-	return true;
-}
-
-static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
-					int lane)
-
-{
-	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
-	int s = ((lane & 1) ?
-		 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
-		 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
-	u8 l = dp_link_status(link_status, i);
-
-	return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
-					     int lane)
-{
-	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
-	int s = ((lane & 1) ?
-		 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
-		 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
-	u8 l = dp_link_status(link_status, i);
-
-	return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
 #define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_1200
 #define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPHASIS_9_5
 
@@ -374,8 +301,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
 	int lane;
 
 	for (lane = 0; lane < lane_count; lane++) {
-		u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
-		u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+		u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+		u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 
 		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
 			  lane,
@@ -420,37 +347,6 @@ static int dp_get_max_dp_pix_clock(int link_rate,
 	return (link_rate * lane_num * 8) / bpp;
 }
 
-static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
-{
-	switch (dpcd[DP_MAX_LINK_RATE]) {
-	case DP_LINK_BW_1_62:
-	default:
-		return 162000;
-	case DP_LINK_BW_2_7:
-		return 270000;
-	case DP_LINK_BW_5_4:
-		return 540000;
-	}
-}
-
-static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
-{
-	return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
-}
-
-static u8 dp_get_dp_link_rate_coded(int link_rate)
-{
-	switch (link_rate) {
-	case 162000:
-	default:
-		return DP_LINK_BW_1_62;
-	case 270000:
-		return DP_LINK_BW_2_7;
-	case 540000:
-		return DP_LINK_BW_5_4;
-	}
-}
-
 /***** radeon specific DP functions *****/
 
 /* First get the min lane# when low rate is used according to pixel clock
@@ -462,8 +358,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
 					int pix_clock)
 {
 	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
-	int max_link_rate = dp_get_max_link_rate(dpcd);
-	int max_lane_num = dp_get_max_lane_number(dpcd);
+	int max_link_rate = drm_dp_max_link_rate(dpcd);
+	int max_lane_num = drm_dp_max_lane_count(dpcd);
 	int lane_num;
 	int max_dp_pix_clock;
 
@@ -500,7 +396,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
 			return 540000;
 	}
 
-	return dp_get_max_link_rate(dpcd);
+	return drm_dp_max_link_rate(dpcd);
 }
 
 static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -551,14 +447,15 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
 bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
 {
 	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
-	u8 msg[25];
+	u8 msg[DP_DPCD_SIZE];
 	int ret, i;
 
-	ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
+	ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
+					DP_DPCD_SIZE, 0);
 	if (ret > 0) {
-		memcpy(dig_connector->dpcd, msg, 8);
+		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 		DRM_DEBUG_KMS("DPCD: ");
-		for (i = 0; i < 8; i++)
+		for (i = 0; i < DP_DPCD_SIZE; i++)
 			DRM_DEBUG_KMS("%02x ", msg[i]);
 		DRM_DEBUG_KMS("\n");
 
@@ -664,7 +561,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
 
 	if (!radeon_dp_get_link_status(radeon_connector, link_status))
 		return false;
-	if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
+	if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
 		return false;
 	return true;
 }
@@ -677,9 +574,8 @@ struct radeon_dp_link_train_info {
 	int enc_id;
 	int dp_clock;
 	int dp_lane_count;
-	int rd_interval;
 	bool tp3_supported;
-	u8 dpcd[8];
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
 	u8 train_set[4];
 	u8 link_status[DP_LINK_STATUS_SIZE];
 	u8 tries;
@@ -765,7 +661,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
 	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
 
 	/* set the link rate on the sink */
-	tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
+	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
 	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
 
 	/* start training on the source */
@@ -821,17 +717,14 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
 	dp_info->tries = 0;
 	voltage = 0xff;
 	while (1) {
-		if (dp_info->rd_interval == 0)
-			udelay(100);
-		else
-			mdelay(dp_info->rd_interval * 4);
+		drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
 
 		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
 			DRM_ERROR("displayport link status failed\n");
 			break;
 		}
 
-		if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
 			clock_recovery = true;
 			break;
 		}
@@ -886,17 +779,14 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
 	dp_info->tries = 0;
 	channel_eq = false;
 	while (1) {
-		if (dp_info->rd_interval == 0)
-			udelay(400);
-		else
-			mdelay(dp_info->rd_interval * 4);
+		drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
 
 		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
 			DRM_ERROR("displayport link status failed\n");
 			break;
 		}
 
-		if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+		if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
 			channel_eq = true;
 			break;
 		}
@@ -974,14 +864,13 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
 	else
 		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
 
-	dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
 	tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
 	if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
 		dp_info.tp3_supported = true;
 	else
 		dp_info.tp3_supported = false;
 
-	memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
+	memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
 	dp_info.rdev = rdev;
 	dp_info.encoder = encoder;
 	dp_info.connector = connector;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 010bae19554a..4552d4aff317 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
 	    ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
 	     (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
 		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-		radeon_dp_set_link_config(connector, mode);
+		radeon_dp_set_link_config(connector, adjusted_mode);
 	}
 
 	return true;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 5d1d21a6dcdd..f95d7fc1f5e0 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1821,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 	case CHIP_SUMO:
 		rdev->config.evergreen.num_ses = 1;
 		rdev->config.evergreen.max_pipes = 4;
-		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 4;
 		if (rdev->pdev->device == 0x9648)
 			rdev->config.evergreen.max_simds = 3;
 		else if ((rdev->pdev->device == 0x9647) ||
@@ -1844,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
-		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+		gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_SUMO2:
 		rdev->config.evergreen.num_ses = 1;
@@ -1866,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
-		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+		gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_BARTS:
 		rdev->config.evergreen.num_ses = 2;
@@ -1914,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 		break;
 	case CHIP_CAICOS:
 		rdev->config.evergreen.num_ses = 1;
-		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_pipes = 2;
 		rdev->config.evergreen.max_tile_pipes = 2;
 		rdev->config.evergreen.max_simds = 2;
 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
@@ -2034,6 +2034,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG, gb_addr_config);
 
 	tmp = gb_addr_config & NUM_PIPES_MASK;
 	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
@@ -2403,8 +2404,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
 					 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
 		cayman_cp_int_cntl_setup(rdev, 1, 0);
 		cayman_cp_int_cntl_setup(rdev, 2, 0);
+		tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+		WREG32(CAYMAN_DMA1_CNTL, tmp);
 	} else
 		WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL, tmp);
 	WREG32(GRBM_INT_CNTL, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2457,6 +2462,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
 	u32 grbm_int_cntl = 0;
 	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
 	u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+	u32 dma_cntl, dma_cntl1 = 0;
 
 	if (!rdev->irq.installed) {
 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2484,6 +2490,8 @@ int evergreen_irq_set(struct radeon_device *rdev)
 	afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
 	afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
 
+	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
 	if (rdev->family >= CHIP_CAYMAN) {
 		/* enable CP interrupts on all rings */
 		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -2506,6 +2514,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
 		}
 	}
 
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+			DRM_DEBUG("r600_irq_set: sw int dma1\n");
+			dma_cntl1 |= TRAP_ENABLE;
+		}
+	}
+
 	if (rdev->irq.crtc_vblank_int[0] ||
 	    atomic_read(&rdev->irq.pflip[0])) {
 		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2591,6 +2612,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
 		cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
 	} else
 		WREG32(CP_INT_CNTL, cp_int_cntl);
+
+	WREG32(DMA_CNTL, dma_cntl);
+
+	if (rdev->family >= CHIP_CAYMAN)
+		WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3093,6 +3120,16 @@ restart_ih:
 				break;
 			}
 			break;
+		case 146:
+		case 147:
+			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+			/* reset addr and status */
+			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+			break;
 		case 176: /* CP_INT in ring buffer */
 		case 177: /* CP_INT in IB1 */
 		case 178: /* CP_INT in IB2 */
@@ -3116,9 +3153,19 @@ restart_ih:
 			} else
 				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
 			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
 		case 233: /* GUI IDLE */
 			DRM_DEBUG("IH: GUI idle\n");
 			break;
+		case 244: /* DMA trap event */
+			if (rdev->family >= CHIP_CAYMAN) {
+				DRM_DEBUG("IH: DMA1 trap\n");
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+			}
+			break;
 		default:
 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 			break;
@@ -3144,6 +3191,143 @@ restart_ih:
 	return IRQ_HANDLED;
 }
 
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+				   struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+	/* write the fence */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	radeon_ring_write(ring, fence->seq);
+	/* generate an interrupt */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+	/* flush HDP */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL);
+	radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+				   struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+		       uint64_t src_offset, uint64_t dst_offset,
+		       unsigned num_gpu_pages,
+		       struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFFF)
+			cur_size_in_dw = 0xFFFFF;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
 static int evergreen_startup(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -3207,6 +3391,12 @@ static int evergreen_startup(struct radeon_device *rdev)
 		return r;
 	}
 
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
 	/* Enable IRQ */
 	r = r600_irq_init(rdev);
 	if (r) {
@@ -3221,10 +3411,21 @@ static int evergreen_startup(struct radeon_device *rdev)
 			     0, 0xfffff, RADEON_CP_PACKET2);
 	if (r)
 		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR, DMA_RB_WPTR,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
 	r = evergreen_cp_load_microcode(rdev);
 	if (r)
 		return r;
 	r = evergreen_cp_resume(rdev);
+	if (r)
+		return r;
+	r = r600_dma_resume(rdev);
 	if (r)
 		return r;
 
@@ -3273,11 +3474,9 @@ int evergreen_resume(struct radeon_device *rdev)
 
 int evergreen_suspend(struct radeon_device *rdev)
 {
-	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
 	r600_audio_fini(rdev);
 	r700_cp_stop(rdev);
-	ring->ready = false;
+	r600_dma_stop(rdev);
 	evergreen_irq_suspend(rdev);
 	radeon_wb_disable(rdev);
 	evergreen_pcie_gart_disable(rdev);
@@ -3354,6 +3553,9 @@ int evergreen_init(struct radeon_device *rdev)
 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -3366,6 +3568,7 @@ int evergreen_init(struct radeon_device *rdev)
 	if (r) {
 		dev_err(rdev->dev, "disabling GPU acceleration\n");
 		r700_cp_fini(rdev);
+		r600_dma_fini(rdev);
 		r600_irq_fini(rdev);
 		radeon_wb_fini(rdev);
 		radeon_ib_pool_fini(rdev);
@@ -3393,6 +3596,7 @@ void evergreen_fini(struct radeon_device *rdev)
 	r600_audio_fini(rdev);
 	r600_blit_fini(rdev);
 	r700_cp_fini(rdev);
+	r600_dma_fini(rdev);
 	r600_irq_fini(rdev);
 	radeon_wb_fini(rdev);
 	radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c042e497e450..74c6b42d2597 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -34,6 +34,8 @@
 #define MAX(a,b)                   (((a)>(b))?(a):(b))
 #define MIN(a,b)                   (((a)<(b))?(a):(b))
 
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+			   struct radeon_cs_reloc **cs_reloc);
 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
 					  struct radeon_cs_reloc **cs_reloc);
 
@@ -507,20 +509,28 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
 		/* height is npipes htiles aligned == npipes * 8 pixel aligned */
 		nby = round_up(nby, track->npipes * 8);
 	} else {
+		/* always assume 8x8 htile */
+		/* align is htile align * 8, htile align vary according to
+		 * number of pipe and tile width and nby
+		 */
 		switch (track->npipes) {
 		case 8:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
 			nbx = round_up(nbx, 64 * 8);
 			nby = round_up(nby, 64 * 8);
 			break;
 		case 4:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
 			nbx = round_up(nbx, 64 * 8);
 			nby = round_up(nby, 32 * 8);
 			break;
 		case 2:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
 			nbx = round_up(nbx, 32 * 8);
 			nby = round_up(nby, 32 * 8);
 			break;
 		case 1:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
 			nbx = round_up(nbx, 32 * 8);
 			nby = round_up(nby, 16 * 8);
 			break;
@@ -531,9 +541,10 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
 		}
 	}
 	/* compute number of htile */
-	nbx = nbx / 8;
-	nby = nby / 8;
-	size = nbx * nby * 4;
+	nbx = nbx >> 3;
+	nby = nby >> 3;
+	/* size must be aligned on npipes * 2K boundary */
+	size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
 	size += track->htile_offset;
 
 	if (size > radeon_bo_size(track->htile_bo)) {
@@ -1790,6 +1801,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 	case DB_HTILE_SURFACE:
 		/* 8x8 only */
 		track->htile_surface = radeon_get_ib_value(p, idx);
+		/* force 8x8 htile width and height */
+		ib[idx] |= 3;
 		track->db_dirty = true;
 		break;
 	case CB_IMMED0_BASE:
@@ -2232,6 +2245,107 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
 			ib[idx+2] = upper_32_bits(offset) & 0xff;
 		}
 		break;
+	case PACKET3_CP_DMA:
+	{
+		u32 command, size, info;
+		u64 offset, tmp;
+		if (pkt->count != 4) {
+			DRM_ERROR("bad CP DMA\n");
+			return -EINVAL;
+		}
+		command = radeon_get_ib_value(p, idx+4);
+		size = command & 0x1fffff;
+		info = radeon_get_ib_value(p, idx+1);
+		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+		    ((((info & 0x00300000) >> 20) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+		    ((((info & 0x60000000) >> 29) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+			/* non mem to mem copies requires dw aligned count */
+			if (size % 4) {
+				DRM_ERROR("CP DMA command requires dw count alignment\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			/* GDS is ok */
+			if (((info & 0x60000000) >> 29) != 1) {
+				DRM_ERROR("CP DMA SAS not supported\n");
+				return -EINVAL;
+			}
+		} else {
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				DRM_ERROR("CP DMA SAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			/* src address space is memory */
+			if (((info & 0x60000000) >> 29) == 0) {
+				r = evergreen_cs_packet_next_reloc(p, &reloc);
+				if (r) {
+					DRM_ERROR("bad CP DMA SRC\n");
+					return -EINVAL;
+				}
+
+				tmp = radeon_get_ib_value(p, idx) +
+					((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+				offset = reloc->lobj.gpu_offset + tmp;
+
+				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+					dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+						 tmp + size, radeon_bo_size(reloc->robj));
+					return -EINVAL;
+				}
+
+				ib[idx] = offset;
+				ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+			} else if (((info & 0x60000000) >> 29) != 2) {
+				DRM_ERROR("bad CP DMA SRC_SEL\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			/* GDS is ok */
+			if (((info & 0x00300000) >> 20) != 1) {
+				DRM_ERROR("CP DMA DAS not supported\n");
+				return -EINVAL;
+			}
+		} else {
+			/* dst address space is memory */
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				DRM_ERROR("CP DMA DAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			if (((info & 0x00300000) >> 20) == 0) {
+				r = evergreen_cs_packet_next_reloc(p, &reloc);
+				if (r) {
+					DRM_ERROR("bad CP DMA DST\n");
+					return -EINVAL;
+				}
+
+				tmp = radeon_get_ib_value(p, idx+2) +
+					((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+				offset = reloc->lobj.gpu_offset + tmp;
+
+				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+					dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+						 tmp + size, radeon_bo_size(reloc->robj));
+					return -EINVAL;
+				}
+
+				ib[idx+2] = offset;
+				ib[idx+3] = upper_32_bits(offset) & 0xff;
+			} else {
+				DRM_ERROR("bad CP DMA DST_SEL\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	}
 	case PACKET3_SURFACE_SYNC:
 		if (pkt->count != 3) {
 			DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2715,6 +2829,455 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
 	return 0;
 }
 
+/*
+ *  DMA
+ */
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
+#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p:		parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+	u32 header, cmd, count, tiled, new_cmd, misc;
+	volatile u32 *ib = p->ib.ptr;
+	u32 idx, idx_value;
+	u64 src_offset, dst_offset, dst2_offset;
+	int r;
+
+	do {
+		if (p->idx >= ib_chunk->length_dw) {
+			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+				  p->idx, ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		idx = p->idx;
+		header = radeon_get_ib_value(p, idx);
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		tiled = GET_DMA_T(header);
+		new_cmd = GET_DMA_NEW(header);
+		misc = GET_DMA_MISC(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				dst_offset = ib[idx+1];
+				dst_offset <<= 8;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				p->idx += count + 7;
+			} else {
+				dst_offset = ib[idx+1];
+				dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += count + 3;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+					 dst_offset, radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			r = r600_dma_cs_next_reloc(p, &src_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				idx_value = radeon_get_ib_value(p, idx + 2);
+				if (new_cmd) {
+					switch (misc) {
+					case 0:
+						/* L2T, frame to fields */
+						if (idx_value & (1 << 31)) {
+							DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+						if (r) {
+							DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						dst_offset = ib[idx+1];
+						dst_offset <<= 8;
+						dst2_offset = ib[idx+2];
+						dst2_offset <<= 8;
+						src_offset = ib[idx+8];
+						src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+								 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+								 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+						ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 10;
+						break;
+					case 1:
+						/* L2T, T2L partial */
+						if (p->family < CHIP_CAYMAN) {
+							DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+							return -EINVAL;
+						}
+						/* detile bit */
+						if (idx_value & (1 << 31)) {
+							/* tiled src, linear dst */
+							ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+							ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						} else {
+							/* linear src, tiled dst */
+							ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+							ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						}
+						p->idx += 12;
+						break;
+					case 3:
+						/* L2T, broadcast */
+						if (idx_value & (1 << 31)) {
+							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+						if (r) {
+							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						dst_offset = ib[idx+1];
+						dst_offset <<= 8;
+						dst2_offset = ib[idx+2];
+						dst2_offset <<= 8;
+						src_offset = ib[idx+8];
+						src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+								 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+								 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+						ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 10;
+						break;
+					case 4:
+						/* L2T, T2L */
+						/* detile bit */
+						if (idx_value & (1 << 31)) {
+							/* tiled src, linear dst */
+							src_offset = ib[idx+1];
+							src_offset <<= 8;
+							ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+							dst_offset = ib[idx+7];
+							dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+							ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						} else {
+							/* linear src, tiled dst */
+							src_offset = ib[idx+7];
+							src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+							ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+							dst_offset = ib[idx+1];
+							dst_offset <<= 8;
+							ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						}
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+								 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						p->idx += 9;
+						break;
+					case 5:
+						/* T2T partial */
+						if (p->family < CHIP_CAYMAN) {
+							DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+						ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						p->idx += 13;
+						break;
+					case 7:
+						/* L2T, broadcast */
+						if (idx_value & (1 << 31)) {
+							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+						if (r) {
+							DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						dst_offset = ib[idx+1];
+						dst_offset <<= 8;
+						dst2_offset = ib[idx+2];
+						dst2_offset <<= 8;
+						src_offset = ib[idx+8];
+						src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+								 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+								 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+						ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 10;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				} else {
+					switch (misc) {
+					case 0:
+						/* detile bit */
+						if (idx_value & (1 << 31)) {
+							/* tiled src, linear dst */
+							src_offset = ib[idx+1];
+							src_offset <<= 8;
+							ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+							dst_offset = ib[idx+7];
+							dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+							ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						} else {
+							/* linear src, tiled dst */
+							src_offset = ib[idx+7];
+							src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+							ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+							ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+							dst_offset = ib[idx+1];
+							dst_offset <<= 8;
+							ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+						}
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+								 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						p->idx += 9;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				}
+			} else {
+				if (new_cmd) {
+					switch (misc) {
+					case 0:
+						/* L2L, byte */
+						src_offset = ib[idx+2];
+						src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+						dst_offset = ib[idx+1];
+						dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+						if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+								 src_offset + count, radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+								 dst_offset + count, radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+						ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+						ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 5;
+						break;
+					case 1:
+						/* L2L, partial */
+						if (p->family < CHIP_CAYMAN) {
+							DRM_ERROR("L2L Partial is cayman only !\n");
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+						ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+						ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+						p->idx += 9;
+						break;
+					case 4:
+						/* L2L, dw, broadcast */
+						r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+						if (r) {
+							DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+							return -EINVAL;
+						}
+						dst_offset = ib[idx+1];
+						dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+						dst2_offset = ib[idx+2];
+						dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
+						src_offset = ib[idx+3];
+						src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+						if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+								 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+								 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+							return -EINVAL;
+						}
+						if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+							dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+								 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+							return -EINVAL;
+						}
+						ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+						ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+						ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+						ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+						p->idx += 7;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				} else {
+					/* L2L, dw */
+					src_offset = ib[idx+2];
+					src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+					dst_offset = ib[idx+1];
+					dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+					if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+						dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+							 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+						return -EINVAL;
+					}
+					if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+						dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+							 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+						return -EINVAL;
+					}
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+					ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+					p->idx += 5;
+				}
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+				return -EINVAL;
+			}
+			dst_offset = ib[idx+1];
+			dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+					 dst_offset, radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+			p->idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			p->idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib->length_dw; r++) {
+		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	return 0;
+}
+
 /* vm parser */
 static bool evergreen_vm_reg_valid(u32 reg)
 {
@@ -2843,6 +3406,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
 	u32 idx = pkt->idx + 1;
 	u32 idx_value = ib[idx];
 	u32 start_reg, end_reg, reg, i;
+	u32 command, info;
 
 	switch (pkt->opcode) {
 	case PACKET3_NOP:
@@ -2917,6 +3481,64 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
 				return -EINVAL;
 		}
 		break;
+	case PACKET3_CP_DMA:
+		command = ib[idx + 4];
+		info = ib[idx + 1];
+		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+		    ((((info & 0x00300000) >> 20) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+		    ((((info & 0x60000000) >> 29) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+			/* non mem to mem copies requires dw aligned count */
+			if ((command & 0x1fffff) % 4) {
+				DRM_ERROR("CP DMA command requires dw count alignment\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			if (((info & 0x60000000) >> 29) == 0) {
+				start_reg = idx_value << 2;
+				if (command & PACKET3_CP_DMA_CMD_SAIC) {
+					reg = start_reg;
+					if (!evergreen_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad SRC register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!evergreen_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad SRC register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			if (((info & 0x00300000) >> 20) == 0) {
+				start_reg = ib[idx + 2];
+				if (command & PACKET3_CP_DMA_CMD_DAIC) {
+					reg = start_reg;
+					if (!evergreen_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad DST register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!evergreen_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad DST register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -2958,3 +3580,114 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
 
 	return ret;
 }
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib:	radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	u32 idx = 0;
+	u32 header, cmd, count, tiled, new_cmd, misc;
+
+	do {
+		header = ib->ptr[idx];
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		tiled = GET_DMA_T(header);
+		new_cmd = GET_DMA_NEW(header);
+		misc = GET_DMA_MISC(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			if (tiled)
+				idx += count + 7;
+			else
+				idx += count + 3;
+			break;
+		case DMA_PACKET_COPY:
+			if (tiled) {
+				if (new_cmd) {
+					switch (misc) {
+					case 0:
+						/* L2T, frame to fields */
+						idx += 10;
+						break;
+					case 1:
+						/* L2T, T2L partial */
+						idx += 12;
+						break;
+					case 3:
+						/* L2T, broadcast */
+						idx += 10;
+						break;
+					case 4:
+						/* L2T, T2L */
+						idx += 9;
+						break;
+					case 5:
+						/* T2T partial */
+						idx += 13;
+						break;
+					case 7:
+						/* L2T, broadcast */
+						idx += 10;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				} else {
+					switch (misc) {
+					case 0:
+						idx += 9;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				}
+			} else {
+				if (new_cmd) {
+					switch (misc) {
+					case 0:
+						/* L2L, byte */
+						idx += 5;
+						break;
+					case 1:
+						/* L2L, partial */
+						idx += 9;
+						break;
+					case 4:
+						/* L2L, dw, broadcast */
+						idx += 7;
+						break;
+					default:
+						DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+						return -EINVAL;
+					}
+				} else {
+					/* L2L, dw */
+					idx += 5;
+				}
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (idx < ib->length_dw);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 2bc0f6a1b428..cb9baaac9e85 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -45,6 +45,8 @@
 #define TURKS_GB_ADDR_CONFIG_GOLDEN          0x02010002
 #define CEDAR_GB_ADDR_CONFIG_GOLDEN          0x02010001
 #define CAICOS_GB_ADDR_CONFIG_GOLDEN         0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN           0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN          0x02010002
 
 /* Registers */
 
@@ -355,6 +357,54 @@
 #       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
 #define AFMT_GENERIC0_7                      0x7138
 
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x5fb8 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x5e78
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
 #define	GC_USER_SHADER_PIPE_CONFIG			0x8954
 #define		INACTIVE_QD_PIPES(x)				((x) << 8)
 #define		INACTIVE_QD_PIPES_MASK				0x0000FF00
@@ -651,6 +701,7 @@
 #define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
 #define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
 #define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT1_CNTL2				0x1434
 #define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
 #define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
 #define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
@@ -672,6 +723,8 @@
 #define		CACHE_UPDATE_MODE(x)				((x) << 6)
 #define	VM_L2_STATUS					0x140C
 #define		L2_BUSY						(1 << 0)
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
 
 #define	WAIT_UNTIL					0x8040
 
@@ -854,6 +907,37 @@
 #       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
 #       define DC_HPDx_EN                                 (1 << 28)
 
+/* ASYNC DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_TILING_CONFIG  				  0xD0B8
+
+#define CAYMAN_DMA1_CNTL                                  0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
 /* PCIE link stuff */
 #define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
 #define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
@@ -951,6 +1035,53 @@
 #define	PACKET3_WAIT_REG_MEM				0x3C
 #define	PACKET3_MEM_WRITE				0x3D
 #define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
 #define	PACKET3_SURFACE_SYNC				0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cda01f808f12..7bdbcb00aaf2 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -611,6 +611,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
 
 	tmp = gb_addr_config & NUM_PIPES_MASK;
 	tmp = r6xx_remap_render_backend(rdev, tmp,
@@ -784,10 +786,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
 	/* enable context1-7 */
 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 	       (u32)(rdev->dummy_page.addr >> 12));
-	WREG32(VM_CONTEXT1_CNTL2, 0);
-	WREG32(VM_CONTEXT1_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL2, 4);
 	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
-				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
 	cayman_pcie_gart_tlb_flush(rdev);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -905,6 +917,7 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 		WREG32(SCRATCH_UMSK, 0);
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 	}
 }
 
@@ -1118,6 +1131,181 @@ static int cayman_cp_resume(struct radeon_device *rdev)
 	return 0;
 }
 
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+				struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+	u32 rb_cntl;
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	/* dma0 */
+	rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+	/* dma1 */
+	rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 rb_cntl, dma_cntl;
+	u32 rb_bufsz;
+	u32 reg_offset, wb_offset;
+	int i, r;
+
+	/* Reset dma */
+	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0) {
+			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+			reg_offset = DMA0_REGISTER_OFFSET;
+			wb_offset = R600_WB_DMA_RPTR_OFFSET;
+		} else {
+			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+			reg_offset = DMA1_REGISTER_OFFSET;
+			wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+		}
+
+		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+		/* Set ring buffer size in dwords */
+		rb_bufsz = drm_order(ring->ring_size / 4);
+		rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32(DMA_RB_RPTR + reg_offset, 0);
+		WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+		/* set the wb address whether it's enabled or not */
+		WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+		       upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+		WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+		       ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+		if (rdev->wb.enabled)
+			rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+		WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+		/* enable DMA IBs */
+		WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
+
+		dma_cntl = RREG32(DMA_CNTL + reg_offset);
+		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+		WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+		ring->wptr = 0;
+		WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+		ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+		ring->ready = true;
+
+		r = radeon_ring_test(rdev, ring->idx, ring);
+		if (r) {
+			ring->ready = false;
+			return r;
+		}
+	}
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+	cayman_dma_stop(rdev);
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
 static int cayman_gpu_soft_reset(struct radeon_device *rdev)
 {
 	struct evergreen_mc_save save;
@@ -1208,6 +1396,32 @@ int cayman_asic_reset(struct radeon_device *rdev)
 	return cayman_gpu_soft_reset(rdev);
 }
 
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (cayman-SI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 dma_status_reg;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+	else
+		dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+	if (dma_status_reg & DMA_IDLE) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force ring activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
 static int cayman_startup(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -1289,6 +1503,18 @@ static int cayman_startup(struct radeon_device *rdev)
 		return r;
 	}
 
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
 	/* Enable IRQ */
 	r = r600_irq_init(rdev);
 	if (r) {
@@ -1303,6 +1529,23 @@ static int cayman_startup(struct radeon_device *rdev)
 			     0, 0xfffff, RADEON_CP_PACKET2);
 	if (r)
 		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
 	r = cayman_cp_load_microcode(rdev);
 	if (r)
 		return r;
@@ -1310,6 +1553,10 @@ static int cayman_startup(struct radeon_device *rdev)
 	if (r)
 		return r;
 
+	r = cayman_dma_resume(rdev);
+	if (r)
+		return r;
+
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1354,7 +1601,7 @@ int cayman_suspend(struct radeon_device *rdev)
 {
 	r600_audio_fini(rdev);
 	cayman_cp_enable(rdev, false);
-	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	cayman_dma_stop(rdev);
 	evergreen_irq_suspend(rdev);
 	radeon_wb_disable(rdev);
 	cayman_pcie_gart_disable(rdev);
@@ -1421,6 +1668,14 @@ int cayman_init(struct radeon_device *rdev)
 	ring->ring_obj = NULL;
 	r600_ring_init(rdev, ring, 1024 * 1024);
 
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -1433,6 +1688,7 @@ int cayman_init(struct radeon_device *rdev)
 	if (r) {
 		dev_err(rdev->dev, "disabling GPU acceleration\n");
 		cayman_cp_fini(rdev);
+		cayman_dma_fini(rdev);
 		r600_irq_fini(rdev);
 		if (rdev->flags & RADEON_IS_IGP)
 			si_rlc_fini(rdev);
@@ -1463,6 +1719,7 @@ void cayman_fini(struct radeon_device *rdev)
 {
 	r600_blit_fini(rdev);
 	cayman_cp_fini(rdev);
+	cayman_dma_fini(rdev);
 	r600_irq_fini(rdev);
 	if (rdev->flags & RADEON_IS_IGP)
 		si_rlc_fini(rdev);
@@ -1538,30 +1795,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
 	struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
 	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+	uint64_t value;
+	unsigned ndw;
 
-	while (count) {
-		unsigned ndw = 1 + count * 2;
-		if (ndw > 0x3FFF)
-			ndw = 0x3FFF;
+	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+		while (count) {
+			ndw = 1 + count * 2;
+			if (ndw > 0x3FFF)
+				ndw = 0x3FFF;
 
-		radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
-		radeon_ring_write(ring, pe);
-		radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
-		for (; ndw > 1; ndw -= 2, --count, pe += 8) {
-			uint64_t value = 0;
-			if (flags & RADEON_VM_PAGE_SYSTEM) {
-				value = radeon_vm_map_gart(rdev, addr);
-				value &= 0xFFFFFFFFFFFFF000ULL;
-				addr += incr;
-
-			} else if (flags & RADEON_VM_PAGE_VALID) {
-				value = addr;
+			radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+			radeon_ring_write(ring, pe);
+			radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+			for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+				if (flags & RADEON_VM_PAGE_SYSTEM) {
+					value = radeon_vm_map_gart(rdev, addr);
+					value &= 0xFFFFFFFFFFFFF000ULL;
+				} else if (flags & RADEON_VM_PAGE_VALID) {
+					value = addr;
+				} else {
+					value = 0;
+				}
 				addr += incr;
+				value |= r600_flags;
+				radeon_ring_write(ring, value);
+				radeon_ring_write(ring, upper_32_bits(value));
 			}
+		}
+	} else {
+		while (count) {
+			ndw = count * 2;
+			if (ndw > 0xFFFFE)
+				ndw = 0xFFFFE;
 
-			value |= r600_flags;
-			radeon_ring_write(ring, value);
-			radeon_ring_write(ring, upper_32_bits(value));
+			/* for non-physically contiguous pages (system) */
+			radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+			radeon_ring_write(ring, pe);
+			radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+				if (flags & RADEON_VM_PAGE_SYSTEM) {
+					value = radeon_vm_map_gart(rdev, addr);
+					value &= 0xFFFFFFFFFFFFF000ULL;
+				} else if (flags & RADEON_VM_PAGE_VALID) {
+					value = addr;
+				} else {
+					value = 0;
+				}
+				addr += incr;
+				value |= r600_flags;
+				radeon_ring_write(ring, value);
+				radeon_ring_write(ring, upper_32_bits(value));
+			}
 		}
 	}
 }
@@ -1596,3 +1880,26 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
 	radeon_ring_write(ring, 0x0);
 }
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+	radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index cbef6815907a..b93186b8ee4b 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -50,6 +50,24 @@
 #define		VMID(x)						(((x) & 0x7) << 0)
 #define	SRBM_STATUS				        0x0E50
 
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_CG				(1 << 2)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_DMA1				(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_RLC				(1 << 13)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB		       	(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
 #define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
 #define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
 #define		RESPONSE_TYPE_MASK				0x000000F0
@@ -80,7 +98,18 @@
 #define VM_CONTEXT0_CNTL				0x1410
 #define		ENABLE_CONTEXT					(1 << 0)
 #define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
 #define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
 #define VM_CONTEXT1_CNTL				0x1414
 #define VM_CONTEXT0_CNTL2				0x1430
 #define VM_CONTEXT1_CNTL2				0x1434
@@ -588,5 +617,62 @@
 #define	PACKET3_SET_APPEND_CNT			        0x75
 #define	PACKET3_ME_WRITE				0x7A
 
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1 << 31)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_TILING_CONFIG  				  0xd0b8
+#define DMA_MODE                                          0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
 #endif
 
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 376884f1bcd2..8ff7cac222dc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
 	return 0;
 }
 
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+		      bool always_indirect)
 {
-	if (reg < rdev->rmmio_size)
+	if (reg < rdev->rmmio_size && !always_indirect)
 		return readl(((void __iomem *)rdev->rmmio) + reg);
 	else {
+		unsigned long flags;
+		uint32_t ret;
+
+		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
 		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
-		return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+		ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+		return ret;
 	}
 }
 
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+		  bool always_indirect)
 {
-	if (reg < rdev->rmmio_size)
+	if (reg < rdev->rmmio_size && !always_indirect)
 		writel(v, ((void __iomem *)rdev->rmmio) + reg);
 	else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
 		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
 		writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cda280d157da..2aaf147969bd 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1370,6 +1370,29 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 	return radeon_ring_test_lockup(rdev, ring);
 }
 
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 dma_status_reg;
+
+	dma_status_reg = RREG32(DMA_STATUS_REG);
+	if (dma_status_reg & DMA_IDLE) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force ring activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
 int r600_asic_reset(struct radeon_device *rdev)
 {
 	return r600_gpu_soft_reset(rdev);
@@ -1424,13 +1447,7 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
 
 int r600_count_pipe_bits(uint32_t val)
 {
-	int i, ret = 0;
-
-	for (i = 0; i < 32; i++) {
-		ret += val & 1;
-		val >>= 1;
-	}
-	return ret;
+	return hweight32(val);
 }
 
 static void r600_gpu_init(struct radeon_device *rdev)
@@ -1594,6 +1611,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
 	WREG32(GB_TILING_CONFIG, tiling_config);
 	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
 	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+	WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
 
 	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
 	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -1871,6 +1889,7 @@ void r600_cp_stop(struct radeon_device *rdev)
 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
 	WREG32(SCRATCH_UMSK, 0);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 }
 
 int r600_init_microcode(struct radeon_device *rdev)
@@ -2196,6 +2215,128 @@ void r600_cp_fini(struct radeon_device *rdev)
 	radeon_scratch_free(rdev, ring->rptr_save_reg);
 }
 
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+	u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, rb_cntl);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	u32 rb_cntl, dma_cntl;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset dma */
+	if (rdev->family >= CHIP_RV770)
+		WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+	else
+		WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+	WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+	/* Set ring buffer size in dwords */
+	rb_bufsz = drm_order(ring->ring_size / 4);
+	rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+	rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+	WREG32(DMA_RB_CNTL, rb_cntl);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(DMA_RB_RPTR, 0);
+	WREG32(DMA_RB_WPTR, 0);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(DMA_RB_RPTR_ADDR_HI,
+	       upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+	WREG32(DMA_RB_RPTR_ADDR_LO,
+	       ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+	if (rdev->wb.enabled)
+		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+	/* enable DMA IBs */
+	WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
+
+	dma_cntl = RREG32(DMA_CNTL);
+	dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+	WREG32(DMA_CNTL, dma_cntl);
+
+	if (rdev->family >= CHIP_RV770)
+		WREG32(DMA_MODE, 1);
+
+	ring->wptr = 0;
+	WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+	ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+	WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+	ring->ready = true;
+
+	r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+	r600_dma_stop(rdev);
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
 
 /*
  * GPU scratch registers helpers function.
@@ -2252,6 +2393,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	return r;
 }
 
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	unsigned i;
+	int r;
+	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+	u32 tmp;
+
+	if (!ptr) {
+		DRM_ERROR("invalid vram scratch pointer\n");
+		return -EINVAL;
+	}
+
+	tmp = 0xCAFEDEAD;
+	writel(tmp, ptr);
+
+	r = radeon_ring_lock(rdev, ring, 4);
+	if (r) {
+		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+		return r;
+	}
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+	radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+	radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = readl(ptr);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
 void r600_fence_ring_emit(struct radeon_device *rdev,
 			  struct radeon_fence *fence)
 {
@@ -2315,6 +2514,59 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
 }
 
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* write the fence */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	radeon_ring_write(ring, lower_32_bits(fence->seq));
+	/* generate an interrupt */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait)
+{
+	u64 addr = semaphore->gpu_addr;
+	u32 s = emit_wait ? 0 : 1;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
 int r600_copy_blit(struct radeon_device *rdev,
 		   uint64_t src_offset,
 		   uint64_t dst_offset,
@@ -2334,6 +2586,80 @@ int r600_copy_blit(struct radeon_device *rdev,
 	return 0;
 }
 
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx-r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages,
+		  struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFF)
+			cur_size_in_dw = 0xFFFF;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
 			 uint32_t tiling_flags, uint32_t pitch,
 			 uint32_t offset, uint32_t obj_size)
@@ -2349,7 +2675,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
 
 static int r600_startup(struct radeon_device *rdev)
 {
-	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	struct radeon_ring *ring;
 	int r;
 
 	/* enable pcie gen2 link */
@@ -2394,6 +2720,12 @@ static int r600_startup(struct radeon_device *rdev)
 		return r;
 	}
 
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
 	/* Enable IRQ */
 	r = r600_irq_init(rdev);
 	if (r) {
@@ -2403,12 +2735,20 @@ static int r600_startup(struct radeon_device *rdev)
 	}
 	r600_irq_set(rdev);
 
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
 			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
 			     0, 0xfffff, RADEON_CP_PACKET2);
-
 	if (r)
 		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR, DMA_RB_WPTR,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
 	r = r600_cp_load_microcode(rdev);
 	if (r)
 		return r;
@@ -2416,6 +2756,10 @@ static int r600_startup(struct radeon_device *rdev)
 	if (r)
 		return r;
 
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2471,7 +2815,7 @@ int r600_suspend(struct radeon_device *rdev)
 {
 	r600_audio_fini(rdev);
 	r600_cp_stop(rdev);
-	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	r600_dma_stop(rdev);
 	r600_irq_suspend(rdev);
 	radeon_wb_disable(rdev);
 	r600_pcie_gart_disable(rdev);
@@ -2544,6 +2888,9 @@ int r600_init(struct radeon_device *rdev)
 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -2556,6 +2903,7 @@ int r600_init(struct radeon_device *rdev)
 	if (r) {
 		dev_err(rdev->dev, "disabling GPU acceleration\n");
 		r600_cp_fini(rdev);
+		r600_dma_fini(rdev);
 		r600_irq_fini(rdev);
 		radeon_wb_fini(rdev);
 		radeon_ib_pool_fini(rdev);
@@ -2572,6 +2920,7 @@ void r600_fini(struct radeon_device *rdev)
 	r600_audio_fini(rdev);
 	r600_blit_fini(rdev);
 	r600_cp_fini(rdev);
+	r600_dma_fini(rdev);
 	r600_irq_fini(rdev);
 	radeon_wb_fini(rdev);
 	radeon_ib_pool_fini(rdev);
@@ -2674,6 +3023,104 @@ free_scratch:
 	return r;
 }
 
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	unsigned i;
+	int r;
+	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+	u32 tmp = 0;
+
+	if (!ptr) {
+		DRM_ERROR("invalid vram scratch pointer\n");
+		return -EINVAL;
+	}
+
+	tmp = 0xCAFEDEAD;
+	writel(tmp, ptr);
+
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+	ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+	ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+	ib.ptr[3] = 0xDEADBEEF;
+	ib.length_dw = 4;
+
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		return r;
+	}
+	r = radeon_fence_wait(ib.fence, false);
+	if (r) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		return r;
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = readl(ptr);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+		r = -EINVAL;
+	}
+	radeon_ib_free(rdev, &ib);
+	return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
 /*
  * Interrupts
  *
@@ -2865,6 +3312,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
 	u32 tmp;
 
 	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL, tmp);
 	WREG32(GRBM_INT_CNTL, 0);
 	WREG32(DxMODE_INT_MASK, 0);
 	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
@@ -3006,6 +3455,7 @@ int r600_irq_set(struct radeon_device *rdev)
 	u32 grbm_int_cntl = 0;
 	u32 hdmi0, hdmi1;
 	u32 d1grph = 0, d2grph = 0;
+	u32 dma_cntl;
 
 	if (!rdev->irq.installed) {
 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3040,12 +3490,19 @@ int r600_irq_set(struct radeon_device *rdev)
 		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 	}
+	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
 
 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
 		DRM_DEBUG("r600_irq_set: sw int\n");
 		cp_int_cntl |= RB_INT_ENABLE;
 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 	}
+
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
 	if (rdev->irq.crtc_vblank_int[0] ||
 	    atomic_read(&rdev->irq.pflip[0])) {
 		DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3090,6 +3547,7 @@ int r600_irq_set(struct radeon_device *rdev)
 	}
 
 	WREG32(CP_INT_CNTL, cp_int_cntl);
+	WREG32(DMA_CNTL, dma_cntl);
 	WREG32(DxMODE_INT_MASK, mode_int);
 	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
 	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
@@ -3469,6 +3927,10 @@ restart_ih:
 			DRM_DEBUG("IH: CP EOP\n");
 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
 			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
 		case 233: /* GUI IDLE */
 			DRM_DEBUG("IH: GUI idle\n");
 			break;
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 2514123d2d00..be85f75aedda 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -721,12 +721,7 @@ static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
 
 static int r600_count_pipe_bits(uint32_t val)
 {
-	int i, ret = 0;
-	for (i = 0; i < 32; i++) {
-		ret += val & 1;
-		val >>= 1;
-	}
-	return ret;
+	return hweight32(val);
 }
 
 static void r600_gfx_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 211c40252fe0..0be768be530c 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
 			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
 			nby = round_up(nby, track->npipes * 8);
 		} else {
-			/* htile widht & nby (8 or 4) make 2 bits number */
-			tmp = track->htile_surface & 3;
+			/* always assume 8x8 htile */
 			/* align is htile align * 8, htile align vary according to
 			 * number of pipe and tile width and nby
 			 */
 			switch (track->npipes) {
 			case 8:
-				switch (tmp) {
-				case 3:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-					nbx = round_up(nbx, 64 * 8);
-					nby = round_up(nby, 64 * 8);
-					break;
-				case 2:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-				case 1:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-					nbx = round_up(nbx, 64 * 8);
-					nby = round_up(nby, 32 * 8);
-					break;
-				case 0:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-					nbx = round_up(nbx, 32 * 8);
-					nby = round_up(nby, 32 * 8);
-					break;
-				default:
-					return -EINVAL;
-				}
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 64 * 8);
+				nby = round_up(nby, 64 * 8);
 				break;
 			case 4:
-				switch (tmp) {
-				case 3:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-					nbx = round_up(nbx, 64 * 8);
-					nby = round_up(nby, 32 * 8);
-					break;
-				case 2:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-				case 1:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-					nbx = round_up(nbx, 32 * 8);
-					nby = round_up(nby, 32 * 8);
-					break;
-				case 0:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-					nbx = round_up(nbx, 32 * 8);
-					nby = round_up(nby, 16 * 8);
-					break;
-				default:
-					return -EINVAL;
-				}
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 64 * 8);
+				nby = round_up(nby, 32 * 8);
 				break;
 			case 2:
-				switch (tmp) {
-				case 3:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-					nbx = round_up(nbx, 32 * 8);
-					nby = round_up(nby, 32 * 8);
-					break;
-				case 2:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-				case 1:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-					nbx = round_up(nbx, 32 * 8);
-					nby = round_up(nby, 16 * 8);
-					break;
-				case 0:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-					nbx = round_up(nbx, 16 * 8);
-					nby = round_up(nby, 16 * 8);
-					break;
-				default:
-					return -EINVAL;
-				}
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 32 * 8);
+				nby = round_up(nby, 32 * 8);
 				break;
 			case 1:
-				switch (tmp) {
-				case 3:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-					nbx = round_up(nbx, 32 * 8);
-					nby = round_up(nby, 16 * 8);
-					break;
-				case 2:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-				case 1:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-					nbx = round_up(nbx, 16 * 8);
-					nby = round_up(nby, 16 * 8);
-					break;
-				case 0:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-					nbx = round_up(nbx, 16 * 8);
-					nby = round_up(nby, 8 * 8);
-					break;
-				default:
-					return -EINVAL;
-				}
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 32 * 8);
+				nby = round_up(nby, 16 * 8);
 				break;
 			default:
 				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
 			}
 		}
 		/* compute number of htile */
-		nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
-		nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
-		size = nbx * nby * 4;
+		nbx = nbx >> 3;
+		nby = nby >> 3;
+		/* size must be aligned on npipes * 2K boundary */
+		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
 		size += track->htile_offset;
 
 		if (size > radeon_bo_size(track->htile_bo)) {
@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 		break;
 	case DB_HTILE_SURFACE:
 		track->htile_surface = radeon_get_ib_value(p, idx);
+		/* force 8x8 htile width and height */
+		ib[idx] |= 3;
 		track->db_dirty = true;
 		break;
 	case SQ_PGM_START_FS:
@@ -1949,6 +1895,78 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
 			ib[idx+2] = upper_32_bits(offset) & 0xff;
 		}
 		break;
+	case PACKET3_CP_DMA:
+	{
+		u32 command, size;
+		u64 offset, tmp;
+		if (pkt->count != 4) {
+			DRM_ERROR("bad CP DMA\n");
+			return -EINVAL;
+		}
+		command = radeon_get_ib_value(p, idx+4);
+		size = command & 0x1fffff;
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			DRM_ERROR("CP DMA SAS not supported\n");
+			return -EINVAL;
+		} else {
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				DRM_ERROR("CP DMA SAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			/* src address space is memory */
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad CP DMA SRC\n");
+				return -EINVAL;
+			}
+
+			tmp = radeon_get_ib_value(p, idx) +
+				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+			offset = reloc->lobj.gpu_offset + tmp;
+
+			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+				dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+					 tmp + size, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+
+			ib[idx] = offset;
+			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			DRM_ERROR("CP DMA DAS not supported\n");
+			return -EINVAL;
+		} else {
+			/* dst address space is memory */
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				DRM_ERROR("CP DMA DAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			r = r600_cs_packet_next_reloc(p, &reloc);
+			if (r) {
+				DRM_ERROR("bad CP DMA DST\n");
+				return -EINVAL;
+			}
+
+			tmp = radeon_get_ib_value(p, idx+2) +
+				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+			offset = reloc->lobj.gpu_offset + tmp;
+
+			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+				dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+					 tmp + size, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+
+			ib[idx+2] = offset;
+			ib[idx+3] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	}
 	case PACKET3_SURFACE_SYNC:
 		if (pkt->count != 3) {
 			DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2496,3 +2514,196 @@ void r600_cs_legacy_init(void)
 {
 	r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
 }
+
+/*
+ *  DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p:		parser structure holding parsing context.
+ * @cs_reloc:		reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+			   struct radeon_cs_reloc **cs_reloc)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	unsigned idx;
+
+	if (p->chunk_relocs_idx == -1) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	*cs_reloc = NULL;
+	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+	idx = p->dma_reloc_idx;
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		return -EINVAL;
+	}
+	*cs_reloc = p->relocs_ptr[idx];
+	p->dma_reloc_idx++;
+	return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p:		parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	struct radeon_cs_reloc *src_reloc, *dst_reloc;
+	u32 header, cmd, count, tiled;
+	volatile u32 *ib = p->ib.ptr;
+	u32 idx, idx_value;
+	u64 src_offset, dst_offset;
+	int r;
+
+	do {
+		if (p->idx >= ib_chunk->length_dw) {
+			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+				  p->idx, ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		idx = p->idx;
+		header = radeon_get_ib_value(p, idx);
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		tiled = GET_DMA_T(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				dst_offset = ib[idx+1];
+				dst_offset <<= 8;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				p->idx += count + 5;
+			} else {
+				dst_offset = ib[idx+1];
+				dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += count + 3;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			r = r600_dma_cs_next_reloc(p, &src_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				idx_value = radeon_get_ib_value(p, idx + 2);
+				/* detile bit */
+				if (idx_value & (1 << 31)) {
+					/* tiled src, linear dst */
+					src_offset = ib[idx+1];
+					src_offset <<= 8;
+					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+					dst_offset = ib[idx+5];
+					dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+					ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					src_offset = ib[idx+5];
+					src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+					ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+					dst_offset = ib[idx+1];
+					dst_offset <<= 8;
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				}
+				p->idx += 7;
+			} else {
+				src_offset = ib[idx+2];
+				src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+				dst_offset = ib[idx+1];
+				dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += 5;
+			}
+			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+				dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+					 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+				return -EINVAL;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			if (p->family < CHIP_RV770) {
+				DRM_ERROR("Constant Fill is 7xx only !\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			dst_offset = ib[idx+1];
+			dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+			p->idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			p->idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib->length_dw; r++) {
+		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 2b960cb5c18a..909219b1bf80 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -96,6 +96,15 @@
 #define R600_CONFIG_F0_BASE                                     0x542C
 #define R600_CONFIG_APER_SIZE                                   0x5430
 
+#define	R600_BIF_FB_EN						0x5490
+#define		R600_FB_READ_EN					(1 << 0)
+#define		R600_FB_WRITE_EN				(1 << 1)
+
+#define R600_CITF_CNTL           				0x200c
+#define		R600_BLACKOUT_MASK				0x00000003
+
+#define R700_MC_CITF_CNTL           				0x25c0
+
 #define R600_ROM_CNTL                              0x1600
 #       define R600_SCK_OVERWRITE                  (1 << 1)
 #       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index fa6f37099ba9..4a53402b1852 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -590,9 +590,59 @@
 #define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
 #define         WAIT_3D_IDLECLEAN_bit                           (1 << 17)
 
+/* async DMA */
+#define DMA_TILING_CONFIG                                 0x3ec4
+#define DMA_CONFIG                                        0x3e4c
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_MODE                                          0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_CONSTANT_FILL			  0xd /* 7xx only */
+#define	DMA_PACKET_NOP					  0xf
+
 #define IH_RB_CNTL                                        0x3e00
 #       define IH_RB_ENABLE                               (1 << 0)
-#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_SIZE(x)                              ((x) << 1) /* log2 */
 #       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
 #       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
 #       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
@@ -637,7 +687,9 @@
 #define TN_RLC_CLEAR_STATE_RESTORE_BASE                   0x3f20
 
 #define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_DMA                             (1 << 12)
 #       define SOFT_RESET_RLC                             (1 << 13)
+#       define RV770_SOFT_RESET_DMA                       (1 << 20)
 
 #define CP_INT_CNTL                                       0xc124
 #       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
@@ -1134,6 +1186,38 @@
 #define	PACKET3_WAIT_REG_MEM				0x3C
 #define	PACKET3_MEM_WRITE				0x3D
 #define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
 #define	PACKET3_SURFACE_SYNC				0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_TC_ACTION_ENA        (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8c42d54c2e26..5dc744d43d12 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -109,7 +109,7 @@ extern int radeon_lockup_timeout;
 #define RADEON_BIOS_NUM_SCRATCH			8
 
 /* max number of rings */
-#define RADEON_NUM_RINGS			3
+#define RADEON_NUM_RINGS			5
 
 /* fence seq are set to this number when signaled */
 #define RADEON_FENCE_SIGNALED_SEQ		0LL
@@ -122,6 +122,11 @@ extern int radeon_lockup_timeout;
 #define CAYMAN_RING_TYPE_CP1_INDEX		1
 #define CAYMAN_RING_TYPE_CP2_INDEX		2
 
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX		3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX		4
+
 /* hardcode those limit for now */
 #define RADEON_VA_IB_OFFSET			(1 << 20)
 #define RADEON_VA_RESERVED_SIZE			(8 << 20)
@@ -313,6 +318,7 @@ struct radeon_bo {
 	struct list_head		list;
 	/* Protected by tbo.reserved */
 	u32				placements[3];
+	u32				busy_placements[3];
 	struct ttm_placement		placement;
 	struct ttm_buffer_object	tbo;
 	struct ttm_bo_kmap_obj		kmap;
@@ -787,6 +793,15 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne
 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
 
 
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
 /*
  * CS.
  */
@@ -824,6 +839,7 @@ struct radeon_cs_parser {
 	struct radeon_cs_reloc	*relocs;
 	struct radeon_cs_reloc	**relocs_ptr;
 	struct list_head	validated;
+	unsigned		dma_reloc_idx;
 	/* indices of various chunks */
 	int			chunk_ib_idx;
 	int			chunk_relocs_idx;
@@ -883,7 +899,9 @@ struct radeon_wb {
 #define RADEON_WB_CP_RPTR_OFFSET 1024
 #define RADEON_WB_CP1_RPTR_OFFSET 1280
 #define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET   1792
 #define R600_WB_IH_WPTR_OFFSET   2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
 #define R600_WB_EVENT_OFFSET     3072
 
 /**
@@ -1539,6 +1557,8 @@ struct radeon_device {
 	/* Register mmio */
 	resource_size_t			rmmio_base;
 	resource_size_t			rmmio_size;
+	/* protects concurrent MM_INDEX/DATA based register access */
+	spinlock_t mmio_idx_lock;
 	void __iomem			*rmmio;
 	radeon_rreg_t			mc_rreg;
 	radeon_wreg_t			mc_wreg;
@@ -1614,8 +1634,10 @@ int radeon_device_init(struct radeon_device *rdev,
 void radeon_device_fini(struct radeon_device *rdev);
 int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
 
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+		      bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+		  bool always_indirect);
 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 
@@ -1631,9 +1653,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 #define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
 #define RREG16(reg) readw((rdev->rmmio) + (reg))
 #define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
-#define RREG32(reg) r100_mm_rreg(rdev, (reg))
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
-#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
@@ -1658,7 +1682,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 		tmp_ |= ((val) & ~(mask));			\
 		WREG32_PLL(reg, tmp_);				\
 	} while (0)
-#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
 #define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
 #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
 
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 654520b95ab7..596bcbe80ed0 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &r600_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &r600_dma_ring_ib_execute,
+			.emit_fence = &r600_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &r600_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
 		}
 	},
 	.irq = {
@@ -963,10 +972,10 @@ static struct radeon_asic r600_asic = {
 	.copy = {
 		.blit = &r600_copy_blit,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.dma = NULL,
-		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.copy = &r600_copy_blit,
-		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
@@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &r600_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &r600_dma_ring_ib_execute,
+			.emit_fence = &r600_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &r600_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
 		}
 	},
 	.irq = {
@@ -1038,10 +1056,10 @@ static struct radeon_asic rs780_asic = {
 	.copy = {
 		.blit = &r600_copy_blit,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.dma = NULL,
-		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.copy = &r600_copy_blit,
-		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
@@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &r600_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &r600_dma_ring_ib_execute,
+			.emit_fence = &r600_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &r600_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
 		}
 	},
 	.irq = {
@@ -1113,10 +1140,10 @@ static struct radeon_asic rv770_asic = {
 	.copy = {
 		.blit = &r600_copy_blit,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.dma = NULL,
-		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.copy = &r600_copy_blit,
-		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
@@ -1172,6 +1199,15 @@ static struct radeon_asic evergreen_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &evergreen_dma_ring_ib_execute,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
 		}
 	},
 	.irq = {
@@ -1188,10 +1224,10 @@ static struct radeon_asic evergreen_asic = {
 	.copy = {
 		.blit = &r600_copy_blit,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.dma = NULL,
-		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.copy = &r600_copy_blit,
-		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
@@ -1248,6 +1284,15 @@ static struct radeon_asic sumo_asic = {
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
 		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &evergreen_dma_ring_ib_execute,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		}
 	},
 	.irq = {
 		.set = &evergreen_irq_set,
@@ -1263,10 +1308,10 @@ static struct radeon_asic sumo_asic = {
 	.copy = {
 		.blit = &r600_copy_blit,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.dma = NULL,
-		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.copy = &r600_copy_blit,
-		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
@@ -1322,6 +1367,15 @@ static struct radeon_asic btc_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &evergreen_dma_ring_ib_execute,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
 		}
 	},
 	.irq = {
@@ -1338,10 +1392,10 @@ static struct radeon_asic btc_asic = {
 	.copy = {
 		.blit = &r600_copy_blit,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.dma = NULL,
-		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.copy = &r600_copy_blit,
-		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
@@ -1391,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
 	.vm = {
 		.init = &cayman_vm_init,
 		.fini = &cayman_vm_fini,
-		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
 		.set_page = &cayman_vm_set_page,
 	},
 	.ring = {
@@ -1427,6 +1481,28 @@ static struct radeon_asic cayman_asic = {
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
 			.vm_flush = &cayman_vm_flush,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
 		}
 	},
 	.irq = {
@@ -1443,10 +1519,10 @@ static struct radeon_asic cayman_asic = {
 	.copy = {
 		.blit = &r600_copy_blit,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.dma = NULL,
-		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.copy = &r600_copy_blit,
-		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
@@ -1496,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
 	.vm = {
 		.init = &cayman_vm_init,
 		.fini = &cayman_vm_fini,
-		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
 		.set_page = &cayman_vm_set_page,
 	},
 	.ring = {
@@ -1532,6 +1608,28 @@ static struct radeon_asic trinity_asic = {
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
 			.vm_flush = &cayman_vm_flush,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
 		}
 	},
 	.irq = {
@@ -1548,10 +1646,10 @@ static struct radeon_asic trinity_asic = {
 	.copy = {
 		.blit = &r600_copy_blit,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.dma = NULL,
-		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.copy = &r600_copy_blit,
-		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
@@ -1601,7 +1699,7 @@ static struct radeon_asic si_asic = {
 	.vm = {
 		.init = &si_vm_init,
 		.fini = &si_vm_fini,
-		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
 		.set_page = &si_vm_set_page,
 	},
 	.ring = {
@@ -1637,6 +1735,28 @@ static struct radeon_asic si_asic = {
 			.ib_test = &r600_ib_test,
 			.is_lockup = &si_gpu_is_lockup,
 			.vm_flush = &si_vm_flush,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &si_dma_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &si_dma_vm_flush,
 		}
 	},
 	.irq = {
@@ -1653,10 +1773,10 @@ static struct radeon_asic si_asic = {
 	.copy = {
 		.blit = NULL,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.dma = NULL,
-		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-		.copy = NULL,
-		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &si_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &si_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5e3a0e5c6be1..5f4882cc2152 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -263,6 +263,7 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
 struct rv515_mc_save {
 	u32 vga_render_control;
 	u32 vga_hdp_control;
+	bool crtc_enabled[2];
 };
 
 int rv515_init(struct radeon_device *rdev);
@@ -303,12 +304,21 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
 uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
 void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
 void r600_fence_ring_emit(struct radeon_device *rdev,
 			  struct radeon_fence *fence);
 void r600_semaphore_ring_emit(struct radeon_device *rdev,
 			      struct radeon_ring *cp,
 			      struct radeon_semaphore *semaphore,
 			      bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_asic_reset(struct radeon_device *rdev);
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -316,11 +326,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
 			 uint32_t offset, uint32_t obj_size);
 void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_copy_blit(struct radeon_device *rdev,
 		   uint64_t src_offset, uint64_t dst_offset,
 		   unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages, struct radeon_fence **fence);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -416,6 +431,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
 int evergreen_irq_set(struct radeon_device *rdev);
 int evergreen_irq_process(struct radeon_device *rdev);
 extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
 extern void evergreen_pm_misc(struct radeon_device *rdev);
 extern void evergreen_pm_prepare(struct radeon_device *rdev);
 extern void evergreen_pm_finish(struct radeon_device *rdev);
@@ -428,6 +444,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
 void evergreen_disable_interrupt_state(struct radeon_device *rdev);
 int evergreen_blit_init(struct radeon_device *rdev);
 int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+				   struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+				   struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+		       uint64_t src_offset, uint64_t dst_offset,
+		       unsigned num_gpu_pages,
+		       struct radeon_fence **fence);
 
 /*
  * cayman
@@ -449,6 +473,11 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 			uint64_t addr, unsigned count,
 			uint32_t incr, uint32_t flags);
 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+				struct radeon_ib *ib);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
 /* DCE6 - SI */
 void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -476,5 +505,10 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 uint64_t si_get_gpu_clock(struct radeon_device *rdev);
+int si_copy_dma(struct radeon_device *rdev,
+		uint64_t src_offset, uint64_t dst_offset,
+		unsigned num_gpu_pages,
+		struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 45b660b27cfc..4af89126e223 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3246,11 +3246,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
 	while (ram--) {
 		addr = ram * 1024 * 1024;
 		/* write to each page */
-		WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
-		WREG32(RADEON_MM_DATA, 0xdeadbeef);
+		WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
 		/* read back and verify */
-		WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
-		if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
+		if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
 			return 0;
 	}
 
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b884c362a8c2..47bf162ab9c6 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1599,7 +1599,7 @@ radeon_add_atom_connector(struct drm_device *dev,
 			connector->interlace_allowed = true;
 			connector->doublescan_allowed = true;
 			radeon_connector->dac_load_detect = true;
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.load_detect_property,
 						      1);
 			break;
@@ -1608,13 +1608,13 @@ radeon_add_atom_connector(struct drm_device *dev,
 		case DRM_MODE_CONNECTOR_HDMIA:
 		case DRM_MODE_CONNECTOR_HDMIB:
 		case DRM_MODE_CONNECTOR_DisplayPort:
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.underscan_property,
 						      UNDERSCAN_OFF);
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.underscan_hborder_property,
 						      0);
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.underscan_vborder_property,
 						      0);
 			subpixel_order = SubPixelHorizontalRGB;
@@ -1625,14 +1625,14 @@ radeon_add_atom_connector(struct drm_device *dev,
 				connector->doublescan_allowed = false;
 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
 				radeon_connector->dac_load_detect = true;
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.load_detect_property,
 							      1);
 			}
 			break;
 		case DRM_MODE_CONNECTOR_LVDS:
 		case DRM_MODE_CONNECTOR_eDP:
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      dev->mode_config.scaling_mode_property,
 						      DRM_MODE_SCALE_FULLSCREEN);
 			subpixel_order = SubPixelHorizontalRGB;
@@ -1651,7 +1651,7 @@ radeon_add_atom_connector(struct drm_device *dev,
 					DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 			}
 			radeon_connector->dac_load_detect = true;
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.load_detect_property,
 						      1);
 			/* no HPD on analog connectors */
@@ -1669,7 +1669,7 @@ radeon_add_atom_connector(struct drm_device *dev,
 					DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 			}
 			radeon_connector->dac_load_detect = true;
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.load_detect_property,
 						      1);
 			/* no HPD on analog connectors */
@@ -1692,23 +1692,23 @@ radeon_add_atom_connector(struct drm_device *dev,
 					DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 			}
 			subpixel_order = SubPixelHorizontalRGB;
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.coherent_mode_property,
 						      1);
 			if (ASIC_IS_AVIVO(rdev)) {
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.underscan_property,
 							      UNDERSCAN_OFF);
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.underscan_hborder_property,
 							      0);
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.underscan_vborder_property,
 							      0);
 			}
 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
 				radeon_connector->dac_load_detect = true;
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.load_detect_property,
 							      1);
 			}
@@ -1732,17 +1732,17 @@ radeon_add_atom_connector(struct drm_device *dev,
 				if (!radeon_connector->ddc_bus)
 					DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 			}
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.coherent_mode_property,
 						      1);
 			if (ASIC_IS_AVIVO(rdev)) {
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.underscan_property,
 							      UNDERSCAN_OFF);
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.underscan_hborder_property,
 							      0);
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.underscan_vborder_property,
 							      0);
 			}
@@ -1771,17 +1771,17 @@ radeon_add_atom_connector(struct drm_device *dev,
 					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 			}
 			subpixel_order = SubPixelHorizontalRGB;
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.coherent_mode_property,
 						      1);
 			if (ASIC_IS_AVIVO(rdev)) {
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.underscan_property,
 							      UNDERSCAN_OFF);
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.underscan_hborder_property,
 							      0);
-				drm_connector_attach_property(&radeon_connector->base,
+				drm_object_attach_property(&radeon_connector->base.base,
 							      rdev->mode_info.underscan_vborder_property,
 							      0);
 			}
@@ -1806,7 +1806,7 @@ radeon_add_atom_connector(struct drm_device *dev,
 				if (!radeon_connector->ddc_bus)
 					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 			}
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      dev->mode_config.scaling_mode_property,
 						      DRM_MODE_SCALE_FULLSCREEN);
 			subpixel_order = SubPixelHorizontalRGB;
@@ -1819,10 +1819,10 @@ radeon_add_atom_connector(struct drm_device *dev,
 			drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
 			drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
 			radeon_connector->dac_load_detect = true;
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.load_detect_property,
 						      1);
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.tv_std_property,
 						      radeon_atombios_get_tv_info(rdev));
 			/* no HPD on analog connectors */
@@ -1843,7 +1843,7 @@ radeon_add_atom_connector(struct drm_device *dev,
 				if (!radeon_connector->ddc_bus)
 					DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 			}
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      dev->mode_config.scaling_mode_property,
 						      DRM_MODE_SCALE_FULLSCREEN);
 			subpixel_order = SubPixelHorizontalRGB;
@@ -1922,7 +1922,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
 				DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 		}
 		radeon_connector->dac_load_detect = true;
-		drm_connector_attach_property(&radeon_connector->base,
+		drm_object_attach_property(&radeon_connector->base.base,
 					      rdev->mode_info.load_detect_property,
 					      1);
 		/* no HPD on analog connectors */
@@ -1940,7 +1940,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
 				DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 		}
 		radeon_connector->dac_load_detect = true;
-		drm_connector_attach_property(&radeon_connector->base,
+		drm_object_attach_property(&radeon_connector->base.base,
 					      rdev->mode_info.load_detect_property,
 					      1);
 		/* no HPD on analog connectors */
@@ -1959,7 +1959,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
 		}
 		if (connector_type == DRM_MODE_CONNECTOR_DVII) {
 			radeon_connector->dac_load_detect = true;
-			drm_connector_attach_property(&radeon_connector->base,
+			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.load_detect_property,
 						      1);
 		}
@@ -1983,10 +1983,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
 		 */
 		if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
 			radeon_connector->dac_load_detect = false;
-		drm_connector_attach_property(&radeon_connector->base,
+		drm_object_attach_property(&radeon_connector->base.base,
 					      rdev->mode_info.load_detect_property,
 					      radeon_connector->dac_load_detect);
-		drm_connector_attach_property(&radeon_connector->base,
+		drm_object_attach_property(&radeon_connector->base.base,
 					      rdev->mode_info.tv_std_property,
 					      radeon_combios_get_tv_info(rdev));
 		/* no HPD on analog connectors */
@@ -2002,7 +2002,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
 			if (!radeon_connector->ddc_bus)
 				DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
 		}
-		drm_connector_attach_property(&radeon_connector->base,
+		drm_object_attach_property(&radeon_connector->base.base,
 					      dev->mode_config.scaling_mode_property,
 					      DRM_MODE_SCALE_FULLSCREEN);
 		subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 8b2797dc7b64..9143fc45e35b 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
 	}
 }
 
-u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
-{
-	u32 ret;
-
-	if (addr < 0x10000)
-		ret = DRM_READ32(dev_priv->mmio, addr);
-	else {
-		DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
-		ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
-	}
-
-	return ret;
-}
-
 static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
 {
 	u32 ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41672cc563fb..396baba0141a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 		return 0;
 	}
 	chunk = &p->chunks[p->chunk_relocs_idx];
+	p->dma_reloc_idx = 0;
 	/* FIXME: we assume that each relocs use 4 dwords */
 	p->nrelocs = chunk->length_dw / 4;
 	p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
 		} else
 			p->ring = RADEON_RING_TYPE_GFX_INDEX;
 		break;
+	case RADEON_CS_RING_DMA:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			if (p->priority > 0)
+				p->ring = R600_RING_TYPE_DMA_INDEX;
+			else
+				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+		} else if (p->rdev->family >= CHIP_R600) {
+			p->ring = R600_RING_TYPE_DMA_INDEX;
+		} else {
+			return -EINVAL;
+		}
+		break;
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0fe56c9f64bd..ad6df625e8b8 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
 	struct radeon_device *rdev = crtc->dev->dev_private;
 
 	if (ASIC_IS_DCE4(rdev)) {
-		WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
-		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
-		       EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+		WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+			   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+			   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
 	} else if (ASIC_IS_AVIVO(rdev)) {
-		WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
-		WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+		WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+			   (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
 	} else {
+		u32 reg;
 		switch (radeon_crtc->crtc_id) {
 		case 0:
-			WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+			reg = RADEON_CRTC_GEN_CNTL;
 			break;
 		case 1:
-			WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+			reg = RADEON_CRTC2_GEN_CNTL;
 			break;
 		default:
 			return;
 		}
-		WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
+		WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e2f5f888c374..49b06590001e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1059,6 +1059,7 @@ int radeon_device_init(struct radeon_device *rdev,
 
 	/* Registers mapping */
 	/* TODO: block userspace mapping of io register */
+	spin_lock_init(&rdev->mmio_idx_lock);
 	rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
 	rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bfa2a6015727..310c0e5254ba 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
 	work->old_rbo = rbo;
 	obj = new_radeon_fb->obj;
 	rbo = gem_to_radeon_bo(obj);
+
+	spin_lock(&rbo->tbo.bdev->fence_lock);
 	if (rbo->tbo.sync_obj)
 		work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+	spin_unlock(&rbo->tbo.bdev->fence_lock);
+
 	INIT_WORK(&work->work, radeon_unpin_work_func);
 
 	/* We borrow the event spin lock for protecting unpin_work */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 07eb84e8a8a4..9b1a727d3c9e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -65,9 +65,12 @@
  *   2.22.0 - r600 only: RESOLVE_BOX allowed
  *   2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
  *   2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ *   2.25.0 - eg+: new info request for num SE and num SH
+ *   2.26.0 - r600-eg: fix htile size computation
+ *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	24
+#define KMS_DRIVER_MINOR	27
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -281,12 +284,15 @@ static struct drm_driver driver_old = {
 
 static struct drm_driver kms_driver;
 
-static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 {
 	struct apertures_struct *ap;
 	bool primary = false;
 
 	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
 	ap->ranges[0].base = pci_resource_start(pdev, 0);
 	ap->ranges[0].size = pci_resource_len(pdev, 0);
 
@@ -295,13 +301,19 @@ static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 #endif
 	remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
 	kfree(ap);
+
+	return 0;
 }
 
 static int __devinit
 radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+	int ret;
+
 	/* Get rid of things like offb */
-	radeon_kick_out_firmware_fb(pdev);
+	ret = radeon_kick_out_firmware_fb(pdev);
+	if (ret)
+		return ret;
 
 	return drm_get_pci_dev(pdev, ent, &kms_driver);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index a1b59ca96d01..e7fdf163a8ca 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
 extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
 extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
 extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
-extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
 
 extern void radeon_freelist_reset(struct drm_device * dev);
 extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 22bd6c2c2740..410a975a8eec 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -772,7 +772,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
 	int r;
 
 	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
-	if (rdev->wb.use_event) {
+	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
 		rdev->fence_drv[ring].scratch_reg = 0;
 		index = R600_WB_EVENT_OFFSET + ring * 4;
 	} else {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4debd60e5aa6..6e24f84755b5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1237,7 +1237,6 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
 {
 	struct radeon_bo_va *bo_va;
 
-	BUG_ON(!atomic_read(&bo->tbo.reserved));
 	list_for_each_entry(bo_va, &bo->va, bo_list) {
 		bo_va->valid = false;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dc781c49b96b..9c312f9afb68 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -361,6 +361,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 			return -EINVAL;
 		}
 		break;
+	case RADEON_INFO_MAX_SE:
+		if (rdev->family >= CHIP_TAHITI)
+			value = rdev->config.si.max_shader_engines;
+		else if (rdev->family >= CHIP_CAYMAN)
+			value = rdev->config.cayman.max_shader_engines;
+		else if (rdev->family >= CHIP_CEDAR)
+			value = rdev->config.evergreen.num_ses;
+		else
+			value = 1;
+		break;
+	case RADEON_INFO_MAX_SH_PER_SE:
+		if (rdev->family >= CHIP_TAHITI)
+			value = rdev->config.si.max_sh_per_se;
+		else
+			return -EINVAL;
+		break;
 	default:
 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92c5f473cf08..d818b503b42f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -427,7 +427,7 @@ struct radeon_connector_atom_dig {
 	uint32_t igp_lane_info;
 	/* displayport */
 	struct radeon_i2c_chan *dp_i2c_bus;
-	u8 dpcd[8];
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
 	u8 dp_sink_type;
 	int dp_clock;
 	int dp_lane_count;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index b91118ccef86..883c95d8d90f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -84,17 +84,34 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 	rbo->placement.fpfn = 0;
 	rbo->placement.lpfn = 0;
 	rbo->placement.placement = rbo->placements;
-	rbo->placement.busy_placement = rbo->placements;
 	if (domain & RADEON_GEM_DOMAIN_VRAM)
 		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 					TTM_PL_FLAG_VRAM;
-	if (domain & RADEON_GEM_DOMAIN_GTT)
-		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-	if (domain & RADEON_GEM_DOMAIN_CPU)
-		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (domain & RADEON_GEM_DOMAIN_GTT) {
+		if (rbo->rdev->flags & RADEON_IS_AGP) {
+			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+		} else {
+			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+		}
+	}
+	if (domain & RADEON_GEM_DOMAIN_CPU) {
+		if (rbo->rdev->flags & RADEON_IS_AGP) {
+			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+		} else {
+			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+		}
+	}
 	if (!c)
 		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 	rbo->placement.num_placement = c;
+
+	c = 0;
+	rbo->placement.busy_placement = rbo->busy_placements;
+	if (rbo->rdev->flags & RADEON_IS_AGP) {
+		rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+	} else {
+		rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+	}
 	rbo->placement.num_busy_placement = c;
 }
 
@@ -140,7 +157,7 @@ int radeon_bo_create(struct radeon_device *rdev,
 	/* Kernel allocation are uninterruptible */
 	down_read(&rdev->pm.mclk_lock);
 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
-			&bo->placement, page_align, 0, !kernel, NULL,
+			&bo->placement, page_align, !kernel, NULL,
 			acc_size, sg, &radeon_ttm_bo_destroy);
 	up_read(&rdev->pm.mclk_lock);
 	if (unlikely(r != 0)) {
@@ -240,7 +257,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
 	}
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 	if (likely(r == 0)) {
 		bo->pin_count = 1;
 		if (gpu_addr != NULL)
@@ -269,7 +286,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
 		return 0;
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 	if (unlikely(r != 0))
 		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
 	return r;
@@ -340,7 +357,6 @@ int radeon_bo_list_validate(struct list_head *head)
 {
 	struct radeon_bo_list *lobj;
 	struct radeon_bo *bo;
-	u32 domain;
 	int r;
 
 	r = ttm_eu_reserve_buffers(head);
@@ -350,17 +366,9 @@ int radeon_bo_list_validate(struct list_head *head)
 	list_for_each_entry(lobj, head, tv.head) {
 		bo = lobj->bo;
 		if (!bo->pin_count) {
-			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
-			
-		retry:
-			radeon_ttm_placement_from_domain(bo, domain);
 			r = ttm_bo_validate(&bo->tbo, &bo->placement,
-						true, false, false);
+						true, false);
 			if (unlikely(r)) {
-				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
-					domain |= RADEON_GEM_DOMAIN_GTT;
-					goto retry;
-				}
 				return r;
 			}
 		}
@@ -384,7 +392,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 	int steal;
 	int i;
 
-	BUG_ON(!atomic_read(&bo->tbo.reserved));
+	BUG_ON(!radeon_bo_is_reserved(bo));
 
 	if (!bo->tiling_flags)
 		return 0;
@@ -510,7 +518,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
 				uint32_t *tiling_flags,
 				uint32_t *pitch)
 {
-	BUG_ON(!atomic_read(&bo->tbo.reserved));
+	BUG_ON(!radeon_bo_is_reserved(bo));
 	if (tiling_flags)
 		*tiling_flags = bo->tiling_flags;
 	if (pitch)
@@ -520,7 +528,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 				bool force_drop)
 {
-	BUG_ON(!atomic_read(&bo->tbo.reserved));
+	BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
 
 	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
 		return 0;
@@ -575,7 +583,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 			/* hurrah the memory is not visible ! */
 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
 			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
-			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+			r = ttm_bo_validate(bo, &rbo->placement, false, false);
 			if (unlikely(r != 0))
 				return r;
 			offset = bo->mem.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 93cd491fff2e..5fc86b03043b 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -80,7 +80,7 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
 
 static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
 {
-	return !!atomic_read(&bo->tbo.reserved);
+	return ttm_bo_is_reserved(&bo->tbo);
 }
 
 static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 587c09a00ba2..fda09c9ea689 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -26,16 +26,31 @@
 #include "radeon_reg.h"
 #include "radeon.h"
 
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA  0
+
 
 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
-void radeon_test_moves(struct radeon_device *rdev)
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
 {
 	struct radeon_bo *vram_obj = NULL;
 	struct radeon_bo **gtt_obj = NULL;
 	struct radeon_fence *fence = NULL;
 	uint64_t gtt_addr, vram_addr;
 	unsigned i, n, size;
-	int r;
+	int r, ring;
+
+	switch (flag) {
+	case RADEON_TEST_COPY_DMA:
+		ring = radeon_copy_dma_ring_index(rdev);
+		break;
+	case RADEON_TEST_COPY_BLIT:
+		ring = radeon_copy_blit_ring_index(rdev);
+		break;
+	default:
+		DRM_ERROR("Unknown copy method\n");
+		return;
+	}
 
 	size = 1024 * 1024;
 
@@ -106,7 +121,10 @@ void radeon_test_moves(struct radeon_device *rdev)
 
 		radeon_bo_kunmap(gtt_obj[i]);
 
-		r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		if (ring == R600_RING_TYPE_DMA_INDEX)
+			r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		else
+			r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
 		if (r) {
 			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
 			goto out_cleanup;
@@ -149,7 +167,10 @@ void radeon_test_moves(struct radeon_device *rdev)
 
 		radeon_bo_kunmap(vram_obj);
 
-		r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		if (ring == R600_RING_TYPE_DMA_INDEX)
+			r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		else
+			r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
 		if (r) {
 			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
 			goto out_cleanup;
@@ -223,6 +244,14 @@ out_cleanup:
 	}
 }
 
+void radeon_test_moves(struct radeon_device *rdev)
+{
+	if (rdev->asic->copy.dma)
+		radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+	if (rdev->asic->copy.blit)
+		radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
 void radeon_test_ring_sync(struct radeon_device *rdev,
 			   struct radeon_ring *ringA,
 			   struct radeon_ring *ringB)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5ebe1b3e5db2..1d8ff2f850ba 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
 }
 
 static int radeon_move_blit(struct ttm_buffer_object *bo,
-			bool evict, int no_wait_reserve, bool no_wait_gpu,
+			bool evict, bool no_wait_gpu,
 			struct ttm_mem_reg *new_mem,
 			struct ttm_mem_reg *old_mem)
 {
@@ -265,15 +265,15 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
 			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
 			&fence);
 	/* FIXME: handle copy error */
-	r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
-				      evict, no_wait_reserve, no_wait_gpu, new_mem);
+	r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+				      evict, no_wait_gpu, new_mem);
 	radeon_fence_unref(&fence);
 	return r;
 }
 
 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
 				bool evict, bool interruptible,
-				bool no_wait_reserve, bool no_wait_gpu,
+				bool no_wait_gpu,
 				struct ttm_mem_reg *new_mem)
 {
 	struct radeon_device *rdev;
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
 	placement.busy_placement = &placements;
 	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
-			     interruptible, no_wait_reserve, no_wait_gpu);
+			     interruptible, no_wait_gpu);
 	if (unlikely(r)) {
 		return r;
 	}
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
+	r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
 out_cleanup:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return r;
@@ -320,7 +320,7 @@ out_cleanup:
 
 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
 				bool evict, bool interruptible,
-				bool no_wait_reserve, bool no_wait_gpu,
+				bool no_wait_gpu,
 				struct ttm_mem_reg *new_mem)
 {
 	struct radeon_device *rdev;
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+			     interruptible, no_wait_gpu);
 	if (unlikely(r)) {
 		return r;
 	}
-	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+	r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
@@ -359,7 +360,7 @@ out_cleanup:
 
 static int radeon_bo_move(struct ttm_buffer_object *bo,
 			bool evict, bool interruptible,
-			bool no_wait_reserve, bool no_wait_gpu,
+			bool no_wait_gpu,
 			struct ttm_mem_reg *new_mem)
 {
 	struct radeon_device *rdev;
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
 	if (old_mem->mem_type == TTM_PL_VRAM &&
 	    new_mem->mem_type == TTM_PL_SYSTEM) {
 		r = radeon_move_vram_ram(bo, evict, interruptible,
-					no_wait_reserve, no_wait_gpu, new_mem);
+					no_wait_gpu, new_mem);
 	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
 		   new_mem->mem_type == TTM_PL_VRAM) {
 		r = radeon_move_ram_vram(bo, evict, interruptible,
-					    no_wait_reserve, no_wait_gpu, new_mem);
+					    no_wait_gpu, new_mem);
 	} else {
-		r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+		r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
 	}
 
 	if (r) {
 memcpy:
-		r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+		r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 	}
 	return r;
 }
@@ -471,13 +472,12 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 {
 }
 
-static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
-				bool lazy, bool interruptible)
+static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
 {
 	return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
 }
 
-static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int radeon_sync_obj_flush(void *sync_obj)
 {
 	return 0;
 }
@@ -492,7 +492,7 @@ static void *radeon_sync_obj_ref(void *sync_obj)
 	return radeon_fence_ref((struct radeon_fence *)sync_obj);
 }
 
-static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool radeon_sync_obj_signaled(void *sync_obj)
 {
 	return radeon_fence_signaled((struct radeon_fence *)sync_obj);
 }
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 785d09590b24..2bb6d0e84b3d 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -40,6 +40,12 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
 static void rv515_gpu_init(struct radeon_device *rdev);
 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
 void rv515_debugfs(struct radeon_device *rdev)
 {
 	if (r100_debugfs_rbbm_init(rdev)) {
@@ -281,30 +287,114 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
 
 void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
 {
+	u32 crtc_enabled, tmp, frame_count, blackout;
+	int i, j;
+
 	save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
 	save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
 
-	/* Stop all video */
-	WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+	/* disable VGA render */
 	WREG32(R_000300_VGA_RENDER_CONTROL, 0);
-	WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
-	WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
-	WREG32(R_006080_D1CRTC_CONTROL, 0);
-	WREG32(R_006880_D2CRTC_CONTROL, 0);
-	WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
-	WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
-	WREG32(R_000330_D1VGA_CONTROL, 0);
-	WREG32(R_000338_D2VGA_CONTROL, 0);
+	/* blank the display controllers */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+				radeon_wait_for_vblank(rdev, i);
+				tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+				WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		} else {
+			save->crtc_enabled[i] = false;
+		}
+	}
+
+	radeon_mc_wait_for_idle(rdev);
+
+	if (rdev->family >= CHIP_R600) {
+		if (rdev->family >= CHIP_RV770)
+			blackout = RREG32(R700_MC_CITF_CNTL);
+		else
+			blackout = RREG32(R600_CITF_CNTL);
+		if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+			/* Block CPU access */
+			WREG32(R600_BIF_FB_EN, 0);
+			/* blackout the MC */
+			blackout |= R600_BLACKOUT_MASK;
+			if (rdev->family >= CHIP_RV770)
+				WREG32(R700_MC_CITF_CNTL, blackout);
+			else
+				WREG32(R600_CITF_CNTL, blackout);
+		}
+	}
 }
 
 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
 {
-	WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-	WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-	WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-	WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-	WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-	/* Unlock host access */
+	u32 tmp, frame_count;
+	int i, j;
+
+	/* update crtc base addresses */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->family >= CHIP_RV770) {
+			if (i == 1) {
+				WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+				WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+			} else {
+				WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+				WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+			}
+		}
+		WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+		WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+	}
+	WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+	if (rdev->family >= CHIP_R600) {
+		/* unblackout the MC */
+		if (rdev->family >= CHIP_RV770)
+			tmp = RREG32(R700_MC_CITF_CNTL);
+		else
+			tmp = RREG32(R600_CITF_CNTL);
+		tmp &= ~R600_BLACKOUT_MASK;
+		if (rdev->family >= CHIP_RV770)
+			WREG32(R700_MC_CITF_CNTL, tmp);
+		else
+			WREG32(R600_CITF_CNTL, tmp);
+		/* allow CPU access */
+		WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+	}
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		}
+	}
+	/* Unlock vga access */
 	WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
 	mdelay(1);
 	WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 79814a08c8e5..87c979c4f721 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev)
 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 	WREG32(SCRATCH_UMSK, 0);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 }
 
 static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 	WREG32(GB_TILING_CONFIG, gb_tiling_config);
 	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
 
 	WREG32(CGTS_SYS_TCC_DISABLE, 0);
 	WREG32(CGTS_TCC_DISABLE, 0);
@@ -886,7 +889,7 @@ static int rv770_mc_init(struct radeon_device *rdev)
 
 static int rv770_startup(struct radeon_device *rdev)
 {
-	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	struct radeon_ring *ring;
 	int r;
 
 	/* enable pcie gen2 link */
@@ -932,6 +935,12 @@ static int rv770_startup(struct radeon_device *rdev)
 		return r;
 	}
 
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
 	/* Enable IRQ */
 	r = r600_irq_init(rdev);
 	if (r) {
@@ -941,11 +950,20 @@ static int rv770_startup(struct radeon_device *rdev)
 	}
 	r600_irq_set(rdev);
 
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
 			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
 			     0, 0xfffff, RADEON_CP_PACKET2);
 	if (r)
 		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR, DMA_RB_WPTR,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
 	r = rv770_cp_load_microcode(rdev);
 	if (r)
 		return r;
@@ -953,6 +971,10 @@ static int rv770_startup(struct radeon_device *rdev)
 	if (r)
 		return r;
 
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -995,7 +1017,7 @@ int rv770_suspend(struct radeon_device *rdev)
 {
 	r600_audio_fini(rdev);
 	r700_cp_stop(rdev);
-	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	r600_dma_stop(rdev);
 	r600_irq_suspend(rdev);
 	radeon_wb_disable(rdev);
 	rv770_pcie_gart_disable(rdev);
@@ -1066,6 +1088,9 @@ int rv770_init(struct radeon_device *rdev)
 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -1078,6 +1103,7 @@ int rv770_init(struct radeon_device *rdev)
 	if (r) {
 		dev_err(rdev->dev, "disabling GPU acceleration\n");
 		r700_cp_fini(rdev);
+		r600_dma_fini(rdev);
 		r600_irq_fini(rdev);
 		radeon_wb_fini(rdev);
 		radeon_ib_pool_fini(rdev);
@@ -1093,6 +1119,7 @@ void rv770_fini(struct radeon_device *rdev)
 {
 	r600_blit_fini(rdev);
 	r700_cp_fini(rdev);
+	r600_dma_fini(rdev);
 	r600_irq_fini(rdev);
 	radeon_wb_fini(rdev);
 	radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b0adfc595d75..20e29d23d348 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -109,6 +109,9 @@
 #define     PIPE_TILING__SHIFT              1
 #define     PIPE_TILING__MASK               0x0000000e
 
+#define DMA_TILING_CONFIG                               0x3ec8
+#define DMA_TILING_CONFIG2                              0xd0b8
+
 #define	GC_USER_SHADER_PIPE_CONFIG			0x8954
 #define		INACTIVE_QD_PIPES(x)				((x) << 8)
 #define		INACTIVE_QD_PIPES_MASK				0x0000FF00
@@ -358,6 +361,26 @@
 
 #define	WAIT_UNTIL					0x8040
 
+/* async DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+
 #define	SRBM_STATUS				        0x0E50
 
 /* DCE 3.2 HDMI */
@@ -551,6 +574,54 @@
 #define HDMI_OFFSET0                      (0x7400 - 0x7400)
 #define HDMI_OFFSET1                      (0x7800 - 0x7400)
 
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x71fc /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x7300
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
 #define D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
 #define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6914
 #define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 010156dd949f..ef683653f0b7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1660,6 +1660,8 @@ static void si_gpu_init(struct radeon_device *rdev)
 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
 
 	si_tiling_mode_table_init(rdev);
 
@@ -1836,6 +1838,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
 		WREG32(SCRATCH_UMSK, 0);
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
 	}
 	udelay(50);
 }
@@ -2426,9 +2431,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
 	/* enable context1-15 */
 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 	       (u32)(rdev->dummy_page.addr >> 12));
-	WREG32(VM_CONTEXT1_CNTL2, 0);
+	WREG32(VM_CONTEXT1_CNTL2, 4);
 	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
-				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
 	si_pcie_gart_tlb_flush(rdev);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -2534,6 +2550,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
 	u32 idx = pkt->idx + 1;
 	u32 idx_value = ib[idx];
 	u32 start_reg, end_reg, reg, i;
+	u32 command, info;
 
 	switch (pkt->opcode) {
 	case PACKET3_NOP:
@@ -2633,6 +2650,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
 				return -EINVAL;
 		}
 		break;
+	case PACKET3_CP_DMA:
+		command = ib[idx + 4];
+		info = ib[idx + 1];
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			if (((info & 0x60000000) >> 29) == 0) {
+				start_reg = idx_value << 2;
+				if (command & PACKET3_CP_DMA_CMD_SAIC) {
+					reg = start_reg;
+					if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad SRC register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!si_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad SRC register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			if (((info & 0x00300000) >> 20) == 0) {
+				start_reg = ib[idx + 2];
+				if (command & PACKET3_CP_DMA_CMD_DAIC) {
+					reg = start_reg;
+					if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad DST register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!si_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad DST register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		break;
 	default:
 		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
 		return -EINVAL;
@@ -2809,30 +2872,86 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
 	struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
 	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+	uint64_t value;
+	unsigned ndw;
 
-	while (count) {
-		unsigned ndw = 2 + count * 2;
-		if (ndw > 0x3FFE)
-			ndw = 0x3FFE;
+	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+		while (count) {
+			ndw = 2 + count * 2;
+			if (ndw > 0x3FFE)
+				ndw = 0x3FFE;
 
-		radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
-		radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-					 WRITE_DATA_DST_SEL(1)));
-		radeon_ring_write(ring, pe);
-		radeon_ring_write(ring, upper_32_bits(pe));
-		for (; ndw > 2; ndw -= 2, --count, pe += 8) {
-			uint64_t value;
-			if (flags & RADEON_VM_PAGE_SYSTEM) {
-				value = radeon_vm_map_gart(rdev, addr);
-				value &= 0xFFFFFFFFFFFFF000ULL;
-			} else if (flags & RADEON_VM_PAGE_VALID)
-				value = addr;
-			else
-				value = 0;
-			addr += incr;
-			value |= r600_flags;
-			radeon_ring_write(ring, value);
-			radeon_ring_write(ring, upper_32_bits(value));
+			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+			radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+						 WRITE_DATA_DST_SEL(1)));
+			radeon_ring_write(ring, pe);
+			radeon_ring_write(ring, upper_32_bits(pe));
+			for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+				if (flags & RADEON_VM_PAGE_SYSTEM) {
+					value = radeon_vm_map_gart(rdev, addr);
+					value &= 0xFFFFFFFFFFFFF000ULL;
+				} else if (flags & RADEON_VM_PAGE_VALID) {
+					value = addr;
+				} else {
+					value = 0;
+				}
+				addr += incr;
+				value |= r600_flags;
+				radeon_ring_write(ring, value);
+				radeon_ring_write(ring, upper_32_bits(value));
+			}
+		}
+	} else {
+		/* DMA */
+		if (flags & RADEON_VM_PAGE_SYSTEM) {
+			while (count) {
+				ndw = count * 2;
+				if (ndw > 0xFFFFE)
+					ndw = 0xFFFFE;
+
+				/* for non-physically contiguous pages (system) */
+				radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
+				radeon_ring_write(ring, pe);
+				radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+				for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+					if (flags & RADEON_VM_PAGE_SYSTEM) {
+						value = radeon_vm_map_gart(rdev, addr);
+						value &= 0xFFFFFFFFFFFFF000ULL;
+					} else if (flags & RADEON_VM_PAGE_VALID) {
+						value = addr;
+					} else {
+						value = 0;
+					}
+					addr += incr;
+					value |= r600_flags;
+					radeon_ring_write(ring, value);
+					radeon_ring_write(ring, upper_32_bits(value));
+				}
+			}
+		} else {
+			while (count) {
+				ndw = count * 2;
+				if (ndw > 0xFFFFE)
+					ndw = 0xFFFFE;
+
+				if (flags & RADEON_VM_PAGE_VALID)
+					value = addr;
+				else
+					value = 0;
+				/* for physically contiguous pages (vram) */
+				radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
+				radeon_ring_write(ring, pe); /* dst addr */
+				radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+				radeon_ring_write(ring, r600_flags); /* mask */
+				radeon_ring_write(ring, 0);
+				radeon_ring_write(ring, value); /* value */
+				radeon_ring_write(ring, upper_32_bits(value));
+				radeon_ring_write(ring, incr); /* increment size */
+				radeon_ring_write(ring, 0);
+				pe += ndw * 4;
+				addr += (ndw / 2) * incr;
+				count -= ndw / 2;
+			}
 		}
 	}
 }
@@ -2880,6 +2999,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 	radeon_ring_write(ring, 0x0);
 }
 
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	if (vm->id < 8) {
+		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+	} else {
+		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+	}
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+	radeon_ring_write(ring, 1 << vm->id);
+}
+
 /*
  * RLC
  */
@@ -3048,6 +3193,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
 	WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
 	WREG32(CP_INT_CNTL_RING1, 0);
 	WREG32(CP_INT_CNTL_RING2, 0);
+	tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
 	WREG32(GRBM_INT_CNTL, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -3167,6 +3316,7 @@ int si_irq_set(struct radeon_device *rdev)
 	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
 	u32 grbm_int_cntl = 0;
 	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+	u32 dma_cntl, dma_cntl1;
 
 	if (!rdev->irq.installed) {
 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3187,6 +3337,9 @@ int si_irq_set(struct radeon_device *rdev)
 	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
 	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
+	dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
 	/* enable CP interrupts on all rings */
 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
 		DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -3200,6 +3353,15 @@ int si_irq_set(struct radeon_device *rdev)
 		DRM_DEBUG("si_irq_set: sw int cp2\n");
 		cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
 	}
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int dma1\n");
+		dma_cntl1 |= TRAP_ENABLE;
+	}
 	if (rdev->irq.crtc_vblank_int[0] ||
 	    atomic_read(&rdev->irq.pflip[0])) {
 		DRM_DEBUG("si_irq_set: vblank 0\n");
@@ -3259,6 +3421,9 @@ int si_irq_set(struct radeon_device *rdev)
 	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
 	WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
 
+	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3684,6 +3849,16 @@ restart_ih:
 				break;
 			}
 			break;
+		case 146:
+		case 147:
+			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+			/* reset addr and status */
+			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+			break;
 		case 176: /* RINGID0 CP_INT */
 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
 			break;
@@ -3707,9 +3882,17 @@ restart_ih:
 				break;
 			}
 			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
 		case 233: /* GUI IDLE */
 			DRM_DEBUG("IH: GUI idle\n");
 			break;
+		case 244: /* DMA trap event */
+			DRM_DEBUG("IH: DMA1 trap\n");
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+			break;
 		default:
 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
 			break;
@@ -3733,6 +3916,80 @@ restart_ih:
 	return IRQ_HANDLED;
 }
 
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+		uint64_t src_offset, uint64_t dst_offset,
+		unsigned num_gpu_pages,
+		struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_bytes, cur_size_in_bytes;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+	num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_bytes = size_in_bytes;
+		if (cur_size_in_bytes > 0xFFFFF)
+			cur_size_in_bytes = 0xFFFFF;
+		size_in_bytes -= cur_size_in_bytes;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+		radeon_ring_write(ring, dst_offset & 0xffffffff);
+		radeon_ring_write(ring, src_offset & 0xffffffff);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
 /*
  * startup/shutdown callbacks
  */
@@ -3804,6 +4061,18 @@ static int si_startup(struct radeon_device *rdev)
 		return r;
 	}
 
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
 	/* Enable IRQ */
 	r = si_irq_init(rdev);
 	if (r) {
@@ -3834,6 +4103,22 @@ static int si_startup(struct radeon_device *rdev)
 	if (r)
 		return r;
 
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	if (r)
+		return r;
+
 	r = si_cp_load_microcode(rdev);
 	if (r)
 		return r;
@@ -3841,6 +4126,10 @@ static int si_startup(struct radeon_device *rdev)
 	if (r)
 		return r;
 
+	r = cayman_dma_resume(rdev);
+	if (r)
+		return r;
+
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3882,9 +4171,7 @@ int si_resume(struct radeon_device *rdev)
 int si_suspend(struct radeon_device *rdev)
 {
 	si_cp_enable(rdev, false);
-	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
-	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
-	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	cayman_dma_stop(rdev);
 	si_irq_suspend(rdev);
 	radeon_wb_disable(rdev);
 	si_pcie_gart_disable(rdev);
@@ -3962,6 +4249,14 @@ int si_init(struct radeon_device *rdev)
 	ring->ring_obj = NULL;
 	r600_ring_init(rdev, ring, 1024 * 1024);
 
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -3974,6 +4269,7 @@ int si_init(struct radeon_device *rdev)
 	if (r) {
 		dev_err(rdev->dev, "disabling GPU acceleration\n");
 		si_cp_fini(rdev);
+		cayman_dma_fini(rdev);
 		si_irq_fini(rdev);
 		si_rlc_fini(rdev);
 		radeon_wb_fini(rdev);
@@ -4002,6 +4298,7 @@ void si_fini(struct radeon_device *rdev)
 	r600_blit_fini(rdev);
 #endif
 	si_cp_fini(rdev);
+	cayman_dma_fini(rdev);
 	si_irq_fini(rdev);
 	si_rlc_fini(rdev);
 	radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index a8871afc5b4e..62b46215d423 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -91,7 +91,18 @@
 #define VM_CONTEXT0_CNTL				0x1410
 #define		ENABLE_CONTEXT					(1 << 0)
 #define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
 #define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
 #define VM_CONTEXT1_CNTL				0x1414
 #define VM_CONTEXT0_CNTL2				0x1430
 #define VM_CONTEXT1_CNTL2				0x1434
@@ -104,6 +115,9 @@
 #define	VM_CONTEXT14_PAGE_TABLE_BASE_ADDR		0x1450
 #define	VM_CONTEXT15_PAGE_TABLE_BASE_ADDR		0x1454
 
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
+
 #define VM_INVALIDATE_REQUEST				0x1478
 #define VM_INVALIDATE_RESPONSE				0x147c
 
@@ -835,6 +849,54 @@
 #define	PACKET3_WAIT_REG_MEM				0x3C
 #define	PACKET3_MEM_WRITE				0x3D
 #define	PACKET3_COPY_DATA				0x40
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
 #define	PACKET3_PFP_SYNC_ME				0x42
 #define	PACKET3_SURFACE_SYNC				0x43
 #              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
@@ -922,4 +984,61 @@
 #define	PACKET3_WAIT_ON_AVAIL_BUFFER			0x8A
 #define	PACKET3_SWITCH_BUFFER				0x8B
 
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_TILING_CONFIG  				  0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((b) & 0x1) << 26) |		\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)		((2 << 28) |			\
+					 (1 << 26) |			\
+					 (1 << 21) |			\
+					 (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
 #endif
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0e7a9306bd0c..d917a411ca85 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
 	connector->encoder = encoder;
 
 	drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-	drm_connector_property_set_value(connector,
+	drm_object_property_set_value(&connector->base,
 		sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
 
 	return 0;
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
new file mode 100644
index 000000000000..be1daf7344d3
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -0,0 +1,23 @@
+config DRM_TEGRA
+	tristate "NVIDIA Tegra DRM"
+	depends on DRM && OF && ARCH_TEGRA
+	select DRM_KMS_HELPER
+	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_CMA_HELPER
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	help
+	  Choose this option if you have an NVIDIA Tegra SoC.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called tegra-drm.
+
+if DRM_TEGRA
+
+config DRM_TEGRA_DEBUG
+	bool "NVIDIA Tegra DRM debug support"
+	help
+	  Say yes here to enable debugging support.
+
+endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 000000000000..80f73d1315d0
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,7 @@
+ccflags-y := -Iinclude/drm
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := drm.o fb.o dc.o host1x.o
+tegra-drm-y += output.o rgb.o hdmi.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
new file mode 100644
index 000000000000..074410371e2a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -0,0 +1,834 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <mach/clk.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_dc_window {
+	fixed20_12 x;
+	fixed20_12 y;
+	fixed20_12 w;
+	fixed20_12 h;
+	unsigned int outx;
+	unsigned int outy;
+	unsigned int outw;
+	unsigned int outh;
+	unsigned int stride;
+	unsigned int fmt;
+};
+
+static const struct drm_crtc_funcs tegra_crtc_funcs = {
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = drm_crtc_cleanup,
+};
+
+static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted)
+{
+	return true;
+}
+
+static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
+				  unsigned int bpp)
+{
+	fixed20_12 outf = dfixed_init(out);
+	u32 dda_inc;
+	int max;
+
+	if (v)
+		max = 15;
+	else {
+		switch (bpp) {
+		case 2:
+			max = 8;
+			break;
+
+		default:
+			WARN_ON_ONCE(1);
+			/* fallthrough */
+		case 4:
+			max = 4;
+			break;
+		}
+	}
+
+	outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
+	inf.full -= dfixed_const(1);
+
+	dda_inc = dfixed_div(inf, outf);
+	dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+	return dda_inc;
+}
+
+static inline u32 compute_initial_dda(fixed20_12 in)
+{
+	return dfixed_frac(in);
+}
+
+static int tegra_dc_set_timings(struct tegra_dc *dc,
+				struct drm_display_mode *mode)
+{
+	/* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */
+	unsigned int h_ref_to_sync = 0;
+	unsigned int v_ref_to_sync = 0;
+	unsigned long value;
+
+	tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+
+	value = (v_ref_to_sync << 16) | h_ref_to_sync;
+	tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
+
+	value = ((mode->vsync_end - mode->vsync_start) << 16) |
+		((mode->hsync_end - mode->hsync_start) <<  0);
+	tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
+
+	value = ((mode->vsync_start - mode->vdisplay) << 16) |
+		((mode->hsync_start - mode->hdisplay) <<  0);
+	tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
+
+	value = ((mode->vtotal - mode->vsync_end) << 16) |
+		((mode->htotal - mode->hsync_end) <<  0);
+	tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
+
+	value = (mode->vdisplay << 16) | mode->hdisplay;
+	tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
+
+	return 0;
+}
+
+static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
+				struct drm_display_mode *mode,
+				unsigned long *div)
+{
+	unsigned long pclk = mode->clock * 1000, rate;
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	struct tegra_output *output = NULL;
+	struct drm_encoder *encoder;
+	long err;
+
+	list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
+		if (encoder->crtc == crtc) {
+			output = encoder_to_output(encoder);
+			break;
+		}
+
+	if (!output)
+		return -ENODEV;
+
+	/*
+	 * This assumes that the display controller will divide its parent
+	 * clock by 2 to generate the pixel clock.
+	 */
+	err = tegra_output_setup_clock(output, dc->clk, pclk * 2);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to setup clock: %ld\n", err);
+		return err;
+	}
+
+	rate = clk_get_rate(dc->clk);
+	*div = (rate * 2 / pclk) - 2;
+
+	DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
+
+	return 0;
+}
+
+static int tegra_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted,
+			       int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	unsigned int h_dda, v_dda, bpp;
+	struct tegra_dc_window win;
+	unsigned long div, value;
+	int err;
+
+	err = tegra_crtc_setup_clk(crtc, mode, &div);
+	if (err) {
+		dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
+		return err;
+	}
+
+	/* program display mode */
+	tegra_dc_set_timings(dc, mode);
+
+	value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+	tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+	value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+	value &= ~LVS_OUTPUT_POLARITY_LOW;
+	value &= ~LHS_OUTPUT_POLARITY_LOW;
+	tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+	value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+		DISP_ORDER_RED_BLUE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+	tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+	value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+
+	/* setup window parameters */
+	memset(&win, 0, sizeof(win));
+	win.x.full = dfixed_const(0);
+	win.y.full = dfixed_const(0);
+	win.w.full = dfixed_const(mode->hdisplay);
+	win.h.full = dfixed_const(mode->vdisplay);
+	win.outx = 0;
+	win.outy = 0;
+	win.outw = mode->hdisplay;
+	win.outh = mode->vdisplay;
+
+	switch (crtc->fb->pixel_format) {
+	case DRM_FORMAT_XRGB8888:
+		win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+		break;
+
+	case DRM_FORMAT_RGB565:
+		win.fmt = WIN_COLOR_DEPTH_B5G6R5;
+		break;
+
+	default:
+		win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+		WARN_ON(1);
+		break;
+	}
+
+	bpp = crtc->fb->bits_per_pixel / 8;
+	win.stride = crtc->fb->pitches[0];
+
+	/* program window registers */
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER);
+	value |= WINDOW_A_SELECT;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+	tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
+	tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+	value = V_POSITION(win.outy) | H_POSITION(win.outx);
+	tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+	value = V_SIZE(win.outh) | H_SIZE(win.outw);
+	tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+	value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
+		H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
+	tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+	h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
+	v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
+
+	value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+	tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+	h_dda = compute_initial_dda(win.x);
+	v_dda = compute_initial_dda(win.y);
+
+	tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+	tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+	tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+	tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+	tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
+	tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
+	tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
+			DC_WINBUF_ADDR_H_OFFSET);
+	tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
+
+	value = WIN_ENABLE;
+
+	if (bpp < 24)
+		value |= COLOR_EXPAND;
+
+	tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+	tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
+	tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
+
+	return 0;
+}
+
+static void tegra_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	unsigned int syncpt;
+	unsigned long value;
+
+	/* hardware initialization */
+	tegra_periph_reset_deassert(dc->clk);
+	usleep_range(10000, 20000);
+
+	if (dc->pipe)
+		syncpt = SYNCPT_VBLANK1;
+	else
+		syncpt = SYNCPT_VBLANK0;
+
+	/* initialize display controller */
+	tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+	tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+	value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+		PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+	value |= DISP_CTRL_MODE_C_DISPLAY;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	/* initialize timer */
+	value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+		WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+	value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+		WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+}
+
+static void tegra_crtc_commit(struct drm_crtc *crtc)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	unsigned long update_mask;
+	unsigned long value;
+
+	update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+
+	tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+
+	value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+	value |= FRAME_END_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+	value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+	value |= FRAME_END_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+	tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
+	.dpms = tegra_crtc_dpms,
+	.mode_fixup = tegra_crtc_mode_fixup,
+	.mode_set = tegra_crtc_mode_set,
+	.prepare = tegra_crtc_prepare,
+	.commit = tegra_crtc_commit,
+	.load_lut = tegra_crtc_load_lut,
+};
+
+static irqreturn_t tegra_drm_irq(int irq, void *data)
+{
+	struct tegra_dc *dc = data;
+	unsigned long status;
+
+	status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+	tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+	if (status & FRAME_END_INT) {
+		/*
+		dev_dbg(dc->dev, "%s(): frame end\n", __func__);
+		*/
+	}
+
+	if (status & VBLANK_INT) {
+		/*
+		dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
+		*/
+		drm_handle_vblank(dc->base.dev, dc->pipe);
+	}
+
+	if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
+		/*
+		dev_dbg(dc->dev, "%s(): underflow\n", __func__);
+		*/
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int tegra_dc_show_regs(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_dc *dc = node->info_ent->data;
+
+#define DUMP_REG(name)						\
+	seq_printf(s, "%-40s %#05x %08lx\n", #name, name,	\
+		   tegra_dc_readl(dc, name))
+
+	DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
+	DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+	DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR);
+	DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT);
+	DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL);
+	DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR);
+	DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT);
+	DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL);
+	DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR);
+	DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT);
+	DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL);
+	DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR);
+	DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC);
+	DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+	DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE);
+	DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
+	DUMP_REG(DC_CMD_INT_STATUS);
+	DUMP_REG(DC_CMD_INT_MASK);
+	DUMP_REG(DC_CMD_INT_ENABLE);
+	DUMP_REG(DC_CMD_INT_TYPE);
+	DUMP_REG(DC_CMD_INT_POLARITY);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+	DUMP_REG(DC_CMD_STATE_ACCESS);
+	DUMP_REG(DC_CMD_STATE_CONTROL);
+	DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+	DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+	DUMP_REG(DC_COM_CRC_CONTROL);
+	DUMP_REG(DC_COM_CRC_CHECKSUM);
+	DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0));
+	DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2));
+	DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3));
+	DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0));
+	DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2));
+	DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3));
+	DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0));
+	DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2));
+	DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3));
+	DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0));
+	DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1));
+	DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2));
+	DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3));
+	DUMP_REG(DC_COM_PIN_INPUT_DATA(0));
+	DUMP_REG(DC_COM_PIN_INPUT_DATA(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6));
+	DUMP_REG(DC_COM_PIN_MISC_CONTROL);
+	DUMP_REG(DC_COM_PIN_PM0_CONTROL);
+	DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE);
+	DUMP_REG(DC_COM_PIN_PM1_CONTROL);
+	DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE);
+	DUMP_REG(DC_COM_SPI_CONTROL);
+	DUMP_REG(DC_COM_SPI_START_BYTE);
+	DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB);
+	DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD);
+	DUMP_REG(DC_COM_HSPI_CS_DC);
+	DUMP_REG(DC_COM_SCRATCH_REGISTER_A);
+	DUMP_REG(DC_COM_SCRATCH_REGISTER_B);
+	DUMP_REG(DC_COM_GPIO_CTRL);
+	DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER);
+	DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED);
+	DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+	DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+	DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+	DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY);
+	DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+	DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+	DUMP_REG(DC_DISP_REF_TO_SYNC);
+	DUMP_REG(DC_DISP_SYNC_WIDTH);
+	DUMP_REG(DC_DISP_BACK_PORCH);
+	DUMP_REG(DC_DISP_ACTIVE);
+	DUMP_REG(DC_DISP_FRONT_PORCH);
+	DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+	DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+	DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+	DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+	DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+	DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+	DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+	DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+	DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+	DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+	DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+	DUMP_REG(DC_DISP_M0_CONTROL);
+	DUMP_REG(DC_DISP_M1_CONTROL);
+	DUMP_REG(DC_DISP_DI_CONTROL);
+	DUMP_REG(DC_DISP_PP_CONTROL);
+	DUMP_REG(DC_DISP_PP_SELECT_A);
+	DUMP_REG(DC_DISP_PP_SELECT_B);
+	DUMP_REG(DC_DISP_PP_SELECT_C);
+	DUMP_REG(DC_DISP_PP_SELECT_D);
+	DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+	DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+	DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+	DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+	DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+	DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+	DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+	DUMP_REG(DC_DISP_BORDER_COLOR);
+	DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+	DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+	DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+	DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+	DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+	DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+	DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+	DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+	DUMP_REG(DC_DISP_CURSOR_POSITION);
+	DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+	DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+	DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+	DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+	DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+	DUMP_REG(DC_DISP_SD_CONTROL);
+	DUMP_REG(DC_DISP_SD_CSC_COEFF);
+	DUMP_REG(DC_DISP_SD_LUT(0));
+	DUMP_REG(DC_DISP_SD_LUT(1));
+	DUMP_REG(DC_DISP_SD_LUT(2));
+	DUMP_REG(DC_DISP_SD_LUT(3));
+	DUMP_REG(DC_DISP_SD_LUT(4));
+	DUMP_REG(DC_DISP_SD_LUT(5));
+	DUMP_REG(DC_DISP_SD_LUT(6));
+	DUMP_REG(DC_DISP_SD_LUT(7));
+	DUMP_REG(DC_DISP_SD_LUT(8));
+	DUMP_REG(DC_DISP_SD_FLICKER_CONTROL);
+	DUMP_REG(DC_DISP_DC_PIXEL_COUNT);
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(0));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(1));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(2));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(3));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(4));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(5));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(6));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(7));
+	DUMP_REG(DC_DISP_SD_BL_TF(0));
+	DUMP_REG(DC_DISP_SD_BL_TF(1));
+	DUMP_REG(DC_DISP_SD_BL_TF(2));
+	DUMP_REG(DC_DISP_SD_BL_TF(3));
+	DUMP_REG(DC_DISP_SD_BL_CONTROL);
+	DUMP_REG(DC_DISP_SD_HW_K_VALUES);
+	DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
+	DUMP_REG(DC_WIN_WIN_OPTIONS);
+	DUMP_REG(DC_WIN_BYTE_SWAP);
+	DUMP_REG(DC_WIN_BUFFER_CONTROL);
+	DUMP_REG(DC_WIN_COLOR_DEPTH);
+	DUMP_REG(DC_WIN_POSITION);
+	DUMP_REG(DC_WIN_SIZE);
+	DUMP_REG(DC_WIN_PRESCALED_SIZE);
+	DUMP_REG(DC_WIN_H_INITIAL_DDA);
+	DUMP_REG(DC_WIN_V_INITIAL_DDA);
+	DUMP_REG(DC_WIN_DDA_INC);
+	DUMP_REG(DC_WIN_LINE_STRIDE);
+	DUMP_REG(DC_WIN_BUF_STRIDE);
+	DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+	DUMP_REG(DC_WIN_BUFFER_ADDR_MODE);
+	DUMP_REG(DC_WIN_DV_CONTROL);
+	DUMP_REG(DC_WIN_BLEND_NOKEY);
+	DUMP_REG(DC_WIN_BLEND_1WIN);
+	DUMP_REG(DC_WIN_BLEND_2WIN_X);
+	DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+	DUMP_REG(DC_WIN_BLEND32WIN_XY);
+	DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
+	DUMP_REG(DC_WINBUF_START_ADDR);
+	DUMP_REG(DC_WINBUF_START_ADDR_NS);
+	DUMP_REG(DC_WINBUF_START_ADDR_U);
+	DUMP_REG(DC_WINBUF_START_ADDR_U_NS);
+	DUMP_REG(DC_WINBUF_START_ADDR_V);
+	DUMP_REG(DC_WINBUF_START_ADDR_V_NS);
+	DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+	DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS);
+	DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+	DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS);
+	DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+	DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS);
+	DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS);
+	DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS);
+
+#undef DUMP_REG
+
+	return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+	{ "regs", tegra_dc_show_regs, 0, NULL },
+};
+
+static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
+{
+	unsigned int i;
+	char *name;
+	int err;
+
+	name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe);
+	dc->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+	kfree(name);
+
+	if (!dc->debugfs)
+		return -ENOMEM;
+
+	dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+				    GFP_KERNEL);
+	if (!dc->debugfs_files) {
+		err = -ENOMEM;
+		goto remove;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+		dc->debugfs_files[i].data = dc;
+
+	err = drm_debugfs_create_files(dc->debugfs_files,
+				       ARRAY_SIZE(debugfs_files),
+				       dc->debugfs, minor);
+	if (err < 0)
+		goto free;
+
+	dc->minor = minor;
+
+	return 0;
+
+free:
+	kfree(dc->debugfs_files);
+	dc->debugfs_files = NULL;
+remove:
+	debugfs_remove(dc->debugfs);
+	dc->debugfs = NULL;
+
+	return err;
+}
+
+static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
+{
+	drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files),
+				 dc->minor);
+	dc->minor = NULL;
+
+	kfree(dc->debugfs_files);
+	dc->debugfs_files = NULL;
+
+	debugfs_remove(dc->debugfs);
+	dc->debugfs = NULL;
+
+	return 0;
+}
+
+static int tegra_dc_drm_init(struct host1x_client *client,
+			     struct drm_device *drm)
+{
+	struct tegra_dc *dc = host1x_client_to_dc(client);
+	int err;
+
+	dc->pipe = drm->mode_config.num_crtc;
+
+	drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+	drm_mode_crtc_set_gamma_size(&dc->base, 256);
+	drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+
+	err = tegra_dc_rgb_init(drm, dc);
+	if (err < 0 && err != -ENODEV) {
+		dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
+		return err;
+	}
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_dc_debugfs_init(dc, drm->primary);
+		if (err < 0)
+			dev_err(dc->dev, "debugfs setup failed: %d\n", err);
+	}
+
+	err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
+			       dev_name(dc->dev), dc);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
+			err);
+		return err;
+	}
+
+	return 0;
+}
+
+static int tegra_dc_drm_exit(struct host1x_client *client)
+{
+	struct tegra_dc *dc = host1x_client_to_dc(client);
+	int err;
+
+	devm_free_irq(dc->dev, dc->irq, dc);
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_dc_debugfs_exit(dc);
+		if (err < 0)
+			dev_err(dc->dev, "debugfs cleanup failed: %d\n", err);
+	}
+
+	err = tegra_dc_rgb_exit(dc);
+	if (err) {
+		dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct host1x_client_ops dc_client_ops = {
+	.drm_init = tegra_dc_drm_init,
+	.drm_exit = tegra_dc_drm_exit,
+};
+
+static int tegra_dc_probe(struct platform_device *pdev)
+{
+	struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+	struct resource *regs;
+	struct tegra_dc *dc;
+	int err;
+
+	dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
+	if (!dc)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&dc->list);
+	dc->dev = &pdev->dev;
+
+	dc->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dc->clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		return PTR_ERR(dc->clk);
+	}
+
+	err = clk_prepare_enable(dc->clk);
+	if (err < 0)
+		return err;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs) {
+		dev_err(&pdev->dev, "failed to get registers\n");
+		return -ENXIO;
+	}
+
+	dc->regs = devm_request_and_ioremap(&pdev->dev, regs);
+	if (!dc->regs) {
+		dev_err(&pdev->dev, "failed to remap registers\n");
+		return -ENXIO;
+	}
+
+	dc->irq = platform_get_irq(pdev, 0);
+	if (dc->irq < 0) {
+		dev_err(&pdev->dev, "failed to get IRQ\n");
+		return -ENXIO;
+	}
+
+	INIT_LIST_HEAD(&dc->client.list);
+	dc->client.ops = &dc_client_ops;
+	dc->client.dev = &pdev->dev;
+
+	err = tegra_dc_rgb_probe(dc);
+	if (err < 0 && err != -ENODEV) {
+		dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+		return err;
+	}
+
+	err = host1x_register_client(host1x, &dc->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, dc);
+
+	return 0;
+}
+
+static int tegra_dc_remove(struct platform_device *pdev)
+{
+	struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+	struct tegra_dc *dc = platform_get_drvdata(pdev);
+	int err;
+
+	err = host1x_unregister_client(host1x, &dc->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	clk_disable_unprepare(dc->clk);
+
+	return 0;
+}
+
+static struct of_device_id tegra_dc_of_match[] = {
+	{ .compatible = "nvidia,tegra30-dc", },
+	{ .compatible = "nvidia,tegra20-dc", },
+	{ },
+};
+
+struct platform_driver tegra_dc_driver = {
+	.driver = {
+		.name = "tegra-dc",
+		.owner = THIS_MODULE,
+		.of_match_table = tegra_dc_of_match,
+	},
+	.probe = tegra_dc_probe,
+	.remove = tegra_dc_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
new file mode 100644
index 000000000000..99977b5d5c36
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DC_H
+#define TEGRA_DC_H 1
+
+#define DC_CMD_GENERAL_INCR_SYNCPT		0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL	0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR	0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT		0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL		0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR		0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT		0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL		0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR		0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT		0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL		0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR		0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC		0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0		0x031
+#define DC_CMD_DISPLAY_COMMAND			0x032
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DC_CMD_SIGNAL_RAISE			0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL		0x036
+#define PW0_ENABLE (1 <<  0)
+#define PW1_ENABLE (1 <<  2)
+#define PW2_ENABLE (1 <<  4)
+#define PW3_ENABLE (1 <<  6)
+#define PW4_ENABLE (1 <<  8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+
+#define DC_CMD_INT_STATUS			0x037
+#define DC_CMD_INT_MASK				0x038
+#define DC_CMD_INT_ENABLE			0x039
+#define DC_CMD_INT_TYPE				0x03a
+#define DC_CMD_INT_POLARITY			0x03b
+#define CTXSW_INT     (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define VBLANK_INT    (1 << 2)
+#define WIN_A_UF_INT  (1 << 8)
+#define WIN_B_UF_INT  (1 << 9)
+#define WIN_C_UF_INT  (1 << 10)
+#define WIN_A_OF_INT  (1 << 14)
+#define WIN_B_OF_INT  (1 << 15)
+#define WIN_C_OF_INT  (1 << 16)
+
+#define DC_CMD_SIGNAL_RAISE1			0x03c
+#define DC_CMD_SIGNAL_RAISE2			0x03d
+#define DC_CMD_SIGNAL_RAISE3			0x03e
+
+#define DC_CMD_STATE_ACCESS			0x040
+
+#define DC_CMD_STATE_CONTROL			0x041
+#define GENERAL_ACT_REQ (1 <<  0)
+#define WIN_A_ACT_REQ   (1 <<  1)
+#define WIN_B_ACT_REQ   (1 <<  2)
+#define WIN_C_ACT_REQ   (1 <<  3)
+#define GENERAL_UPDATE  (1 <<  8)
+#define WIN_A_UPDATE    (1 <<  9)
+#define WIN_B_UPDATE    (1 << 10)
+#define WIN_C_UPDATE    (1 << 11)
+#define NC_HOST_TRIG    (1 << 24)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER		0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL			0x043
+
+#define DC_COM_CRC_CONTROL			0x300
+#define DC_COM_CRC_CHECKSUM			0x301
+#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
+#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
+#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
+#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
+#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x))
+#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x))
+#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x))
+#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x))
+
+#define DC_COM_PIN_MISC_CONTROL			0x31b
+#define DC_COM_PIN_PM0_CONTROL			0x31c
+#define DC_COM_PIN_PM0_DUTY_CYCLE		0x31d
+#define DC_COM_PIN_PM1_CONTROL			0x31e
+#define DC_COM_PIN_PM1_DUTY_CYCLE		0x31f
+
+#define DC_COM_SPI_CONTROL			0x320
+#define DC_COM_SPI_START_BYTE			0x321
+#define DC_COM_HSPI_WRITE_DATA_AB		0x322
+#define DC_COM_HSPI_WRITE_DATA_CD		0x323
+#define DC_COM_HSPI_CS_DC			0x324
+#define DC_COM_SCRATCH_REGISTER_A		0x325
+#define DC_COM_SCRATCH_REGISTER_B		0x326
+#define DC_COM_GPIO_CTRL			0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER		0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED		0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0		0x400
+#define H_PULSE_0_ENABLE (1 <<  8)
+#define H_PULSE_1_ENABLE (1 << 10)
+#define H_PULSE_2_ENABLE (1 << 12)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1		0x401
+
+#define DC_DISP_DISP_WIN_OPTIONS		0x402
+#define HDMI_ENABLE (1 << 30)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY		0x403
+#define CURSOR_THRESHOLD(x)   (((x) & 0x03) << 24)
+#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
+#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) <<  8)
+#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) <<  0)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER	0x404
+#define CURSOR_DELAY(x)   (((x) & 0x3f) << 24)
+#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16)
+#define WINDOW_B_DELAY(x) (((x) & 0x3f) <<  8)
+#define WINDOW_C_DELAY(x) (((x) & 0x3f) <<  0)
+
+#define DC_DISP_DISP_TIMING_OPTIONS		0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC			0x406
+#define DC_DISP_SYNC_WIDTH			0x407
+#define DC_DISP_BACK_PORCH			0x408
+#define DC_DISP_ACTIVE				0x409
+#define DC_DISP_FRONT_PORCH			0x40a
+#define DC_DISP_H_PULSE0_CONTROL		0x40b
+#define DC_DISP_H_PULSE0_POSITION_A		0x40c
+#define DC_DISP_H_PULSE0_POSITION_B		0x40d
+#define DC_DISP_H_PULSE0_POSITION_C		0x40e
+#define DC_DISP_H_PULSE0_POSITION_D		0x40f
+#define DC_DISP_H_PULSE1_CONTROL		0x410
+#define DC_DISP_H_PULSE1_POSITION_A		0x411
+#define DC_DISP_H_PULSE1_POSITION_B		0x412
+#define DC_DISP_H_PULSE1_POSITION_C		0x413
+#define DC_DISP_H_PULSE1_POSITION_D		0x414
+#define DC_DISP_H_PULSE2_CONTROL		0x415
+#define DC_DISP_H_PULSE2_POSITION_A		0x416
+#define DC_DISP_H_PULSE2_POSITION_B		0x417
+#define DC_DISP_H_PULSE2_POSITION_C		0x418
+#define DC_DISP_H_PULSE2_POSITION_D		0x419
+#define DC_DISP_V_PULSE0_CONTROL		0x41a
+#define DC_DISP_V_PULSE0_POSITION_A		0x41b
+#define DC_DISP_V_PULSE0_POSITION_B		0x41c
+#define DC_DISP_V_PULSE0_POSITION_C		0x41d
+#define DC_DISP_V_PULSE1_CONTROL		0x41e
+#define DC_DISP_V_PULSE1_POSITION_A		0x41f
+#define DC_DISP_V_PULSE1_POSITION_B		0x420
+#define DC_DISP_V_PULSE1_POSITION_C		0x421
+#define DC_DISP_V_PULSE2_CONTROL		0x422
+#define DC_DISP_V_PULSE2_POSITION_A		0x423
+#define DC_DISP_V_PULSE3_CONTROL		0x424
+#define DC_DISP_V_PULSE3_POSITION_A		0x425
+#define DC_DISP_M0_CONTROL			0x426
+#define DC_DISP_M1_CONTROL			0x427
+#define DC_DISP_DI_CONTROL			0x428
+#define DC_DISP_PP_CONTROL			0x429
+#define DC_DISP_PP_SELECT_A			0x42a
+#define DC_DISP_PP_SELECT_B			0x42b
+#define DC_DISP_PP_SELECT_C			0x42c
+#define DC_DISP_PP_SELECT_D			0x42d
+
+#define PULSE_MODE_NORMAL    (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH  (0 << 4)
+#define PULSE_POLARITY_LOW   (1 << 4)
+#define PULSE_QUAL_ALWAYS    (0 << 6)
+#define PULSE_QUAL_VACTIVE   (2 << 6)
+#define PULSE_QUAL_VACTIVE1  (3 << 6)
+#define PULSE_LAST_START_A   (0 << 8)
+#define PULSE_LAST_END_A     (1 << 8)
+#define PULSE_LAST_START_B   (2 << 8)
+#define PULSE_LAST_END_B     (3 << 8)
+#define PULSE_LAST_START_C   (4 << 8)
+#define PULSE_LAST_END_C     (5 << 8)
+#define PULSE_LAST_START_D   (6 << 8)
+#define PULSE_LAST_END_D     (7 << 8)
+
+#define PULSE_START(x) (((x) & 0xfff) <<  0)
+#define PULSE_END(x)   (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL		0x42e
+#define PIXEL_CLK_DIVIDER_PCD1  (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2  (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3  (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4  (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6  (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8  (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9  (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x)    ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL		0x42f
+#define DISP_DATA_FORMAT_DF1P1C    (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S      (4 << 0)
+#define DISP_DATA_FORMAT_DF3S      (5 << 0)
+#define DISP_DATA_FORMAT_DFSPI     (6 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0)
+#define DISP_ALIGNMENT_MSB         (0 << 8)
+#define DISP_ALIGNMENT_LSB         (1 << 8)
+#define DISP_ORDER_RED_BLUE        (0 << 9)
+#define DISP_ORDER_BLUE_RED        (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL		0x430
+#define BASE_COLOR_SIZE666     (0 << 0)
+#define BASE_COLOR_SIZE111     (1 << 0)
+#define BASE_COLOR_SIZE222     (2 << 0)
+#define BASE_COLOR_SIZE333     (3 << 0)
+#define BASE_COLOR_SIZE444     (4 << 0)
+#define BASE_COLOR_SIZE555     (5 << 0)
+#define BASE_COLOR_SIZE565     (6 << 0)
+#define BASE_COLOR_SIZE332     (7 << 0)
+#define BASE_COLOR_SIZE888     (8 << 0)
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS		0x431
+
+#define DC_DISP_DATA_ENABLE_OPTIONS		0x432
+#define DE_SELECT_ACTIVE_BLANK  (0 << 0)
+#define DE_SELECT_ACTIVE        (1 << 0)
+#define DE_SELECT_ACTIVE_IS     (2 << 0)
+#define DE_CONTROL_ONECLK       (0 << 2)
+#define DE_CONTROL_NORMAL       (1 << 2)
+#define DE_CONTROL_EARLY_EXT    (2 << 2)
+#define DE_CONTROL_EARLY        (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS	0x433
+#define DC_DISP_LCD_SPI_OPTIONS			0x434
+#define DC_DISP_BORDER_COLOR			0x435
+#define DC_DISP_COLOR_KEY0_LOWER		0x436
+#define DC_DISP_COLOR_KEY0_UPPER		0x437
+#define DC_DISP_COLOR_KEY1_LOWER		0x438
+#define DC_DISP_COLOR_KEY1_UPPER		0x439
+
+#define DC_DISP_CURSOR_FOREGROUND		0x43c
+#define DC_DISP_CURSOR_BACKGROUND		0x43d
+
+#define DC_DISP_CURSOR_START_ADDR		0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS		0x43f
+
+#define DC_DISP_CURSOR_POSITION			0x440
+#define DC_DISP_CURSOR_POSITION_NS		0x441
+
+#define DC_DISP_INIT_SEQ_CONTROL		0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A		0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B		0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C		0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D		0x446
+
+#define DC_DISP_DC_MCCIF_FIFOCTRL		0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST		0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST		0x482
+#define DC_DISP_MCCIF_DISPLAY1A_HYST		0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST		0x484
+
+#define DC_DISP_DAC_CRT_CTRL			0x4c0
+#define DC_DISP_DISP_MISC_CONTROL		0x4c1
+#define DC_DISP_SD_CONTROL			0x4c2
+#define DC_DISP_SD_CSC_COEFF			0x4c3
+#define DC_DISP_SD_LUT(x)			(0x4c4 + (x))
+#define DC_DISP_SD_FLICKER_CONTROL		0x4cd
+#define DC_DISP_DC_PIXEL_COUNT			0x4ce
+#define DC_DISP_SD_HISTOGRAM(x)			(0x4cf + (x))
+#define DC_DISP_SD_BL_PARAMETERS		0x4d7
+#define DC_DISP_SD_BL_TF(x)			(0x4d8 + (x))
+#define DC_DISP_SD_BL_CONTROL			0x4dc
+#define DC_DISP_SD_HW_K_VALUES			0x4dd
+#define DC_DISP_SD_MAN_K_VALUES			0x4de
+
+#define DC_WIN_WIN_OPTIONS			0x700
+#define COLOR_EXPAND (1 <<  6)
+#define WIN_ENABLE   (1 << 30)
+
+#define DC_WIN_BYTE_SWAP			0x701
+#define BYTE_SWAP_NOSWAP  (0 << 0)
+#define BYTE_SWAP_SWAP2   (1 << 0)
+#define BYTE_SWAP_SWAP4   (2 << 0)
+#define BYTE_SWAP_SWAP4HW (3 << 0)
+
+#define DC_WIN_BUFFER_CONTROL			0x702
+#define BUFFER_CONTROL_HOST  (0 << 0)
+#define BUFFER_CONTROL_VI    (1 << 0)
+#define BUFFER_CONTROL_EPP   (2 << 0)
+#define BUFFER_CONTROL_MPEGE (3 << 0)
+#define BUFFER_CONTROL_SB2D  (4 << 0)
+
+#define DC_WIN_COLOR_DEPTH			0x703
+#define WIN_COLOR_DEPTH_P1              0
+#define WIN_COLOR_DEPTH_P2              1
+#define WIN_COLOR_DEPTH_P4              2
+#define WIN_COLOR_DEPTH_P8              3
+#define WIN_COLOR_DEPTH_B4G4R4A4        4
+#define WIN_COLOR_DEPTH_B5G5R5A         5
+#define WIN_COLOR_DEPTH_B5G6R5          6
+#define WIN_COLOR_DEPTH_AB5G5R5         7
+#define WIN_COLOR_DEPTH_B8G8R8A8       12
+#define WIN_COLOR_DEPTH_R8G8B8A8       13
+#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
+#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15
+#define WIN_COLOR_DEPTH_YCbCr422       16
+#define WIN_COLOR_DEPTH_YUV422         17
+#define WIN_COLOR_DEPTH_YCbCr420P      18
+#define WIN_COLOR_DEPTH_YUV420P        19
+#define WIN_COLOR_DEPTH_YCbCr422P      20
+#define WIN_COLOR_DEPTH_YUV422P        21
+#define WIN_COLOR_DEPTH_YCbCr422R      22
+#define WIN_COLOR_DEPTH_YUV422R        23
+#define WIN_COLOR_DEPTH_YCbCr422RA     24
+#define WIN_COLOR_DEPTH_YUV422RA       25
+
+#define DC_WIN_POSITION				0x704
+#define H_POSITION(x) (((x) & 0x1fff) <<  0)
+#define V_POSITION(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_SIZE				0x705
+#define H_SIZE(x) (((x) & 0x1fff) <<  0)
+#define V_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE			0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) <<  0)
+#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA			0x707
+#define DC_WIN_V_INITIAL_DDA			0x708
+#define DC_WIN_DDA_INC				0x709
+#define H_DDA_INC(x) (((x) & 0xffff) <<  0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE			0x70a
+#define DC_WIN_BUF_STRIDE			0x70b
+#define DC_WIN_UV_BUF_STRIDE			0x70c
+#define DC_WIN_BUFFER_ADDR_MODE			0x70d
+#define DC_WIN_DV_CONTROL			0x70e
+
+#define DC_WIN_BLEND_NOKEY			0x70f
+#define DC_WIN_BLEND_1WIN			0x710
+#define DC_WIN_BLEND_2WIN_X			0x711
+#define DC_WIN_BLEND_2WIN_Y			0x712
+#define DC_WIN_BLEND32WIN_XY			0x713
+
+#define DC_WIN_HP_FETCH_CONTROL			0x714
+
+#define DC_WINBUF_START_ADDR			0x800
+#define DC_WINBUF_START_ADDR_NS			0x801
+#define DC_WINBUF_START_ADDR_U			0x802
+#define DC_WINBUF_START_ADDR_U_NS		0x803
+#define DC_WINBUF_START_ADDR_V			0x804
+#define DC_WINBUF_START_ADDR_V_NS		0x805
+
+#define DC_WINBUF_ADDR_H_OFFSET			0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS		0x807
+#define DC_WINBUF_ADDR_V_OFFSET			0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS		0x809
+
+#define DC_WINBUF_UFLOW_STATUS			0x80a
+
+#define DC_WINBUF_AD_UFLOW_STATUS		0xbca
+#define DC_WINBUF_BD_UFLOW_STATUS		0xdca
+#define DC_WINBUF_CD_UFLOW_STATUS		0xfca
+
+/* synchronization points */
+#define SYNCPT_VBLANK0 26
+#define SYNCPT_VBLANK1 27
+
+#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 000000000000..3a503c9e4686
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <mach/clk.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
+
+#include "drm.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+	struct device *dev = drm->dev;
+	struct host1x *host1x;
+	int err;
+
+	host1x = dev_get_drvdata(dev);
+	drm->dev_private = host1x;
+	host1x->drm = drm;
+
+	drm_mode_config_init(drm);
+
+	err = host1x_drm_init(host1x, drm);
+	if (err < 0)
+		return err;
+
+	err = tegra_drm_fb_init(drm);
+	if (err < 0)
+		return err;
+
+	drm_kms_helper_poll_init(drm);
+
+	return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+	drm_kms_helper_poll_fini(drm);
+	tegra_drm_fb_exit(drm);
+
+	drm_mode_config_cleanup(drm);
+
+	return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+	return 0;
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+	struct host1x *host1x = drm->dev_private;
+
+	drm_fbdev_cma_restore_mode(host1x->fbdev);
+}
+
+static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+};
+
+static const struct file_operations tegra_drm_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_gem_cma_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+struct drm_driver tegra_drm_driver = {
+	.driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
+	.load = tegra_drm_load,
+	.unload = tegra_drm_unload,
+	.open = tegra_drm_open,
+	.lastclose = tegra_drm_lastclose,
+
+	.gem_free_object = drm_gem_cma_free_object,
+	.gem_vm_ops = &drm_gem_cma_vm_ops,
+	.dumb_create = drm_gem_cma_dumb_create,
+	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
+	.dumb_destroy = drm_gem_cma_dumb_destroy,
+
+	.ioctls = tegra_drm_ioctls,
+	.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+	.fops = &tegra_drm_fops,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
new file mode 100644
index 000000000000..3a843a77ddc7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DRM_H
+#define TEGRA_DRM_H 1
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fixed.h>
+
+struct tegra_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_cma_object *obj;
+};
+
+static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
+{
+	return container_of(fb, struct tegra_framebuffer, base);
+}
+
+struct host1x {
+	struct drm_device *drm;
+	struct device *dev;
+	void __iomem *regs;
+	struct clk *clk;
+	int syncpt;
+	int irq;
+
+	struct mutex drm_clients_lock;
+	struct list_head drm_clients;
+	struct list_head drm_active;
+
+	struct mutex clients_lock;
+	struct list_head clients;
+
+	struct drm_fbdev_cma *fbdev;
+	struct tegra_framebuffer fb;
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+	int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
+	int (*drm_exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+	struct host1x *host1x;
+	struct device *dev;
+
+	const struct host1x_client_ops *ops;
+
+	struct list_head list;
+};
+
+extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm);
+extern int host1x_drm_exit(struct host1x *host1x);
+
+extern int host1x_register_client(struct host1x *host1x,
+				  struct host1x_client *client);
+extern int host1x_unregister_client(struct host1x *host1x,
+				    struct host1x_client *client);
+
+struct tegra_output;
+
+struct tegra_dc {
+	struct host1x_client client;
+
+	struct host1x *host1x;
+	struct device *dev;
+
+	struct drm_crtc base;
+	int pipe;
+
+	struct clk *clk;
+
+	void __iomem *regs;
+	int irq;
+
+	struct tegra_output *rgb;
+
+	struct list_head list;
+
+	struct drm_info_list *debugfs_files;
+	struct drm_minor *minor;
+	struct dentry *debugfs;
+};
+
+static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+{
+	return container_of(client, struct tegra_dc, client);
+}
+
+static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
+{
+	return container_of(crtc, struct tegra_dc, base);
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
+				   unsigned long reg)
+{
+	writel(value, dc->regs + (reg << 2));
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+					   unsigned long reg)
+{
+	return readl(dc->regs + (reg << 2));
+}
+
+struct tegra_output_ops {
+	int (*enable)(struct tegra_output *output);
+	int (*disable)(struct tegra_output *output);
+	int (*setup_clock)(struct tegra_output *output, struct clk *clk,
+			   unsigned long pclk);
+	int (*check_mode)(struct tegra_output *output,
+			  struct drm_display_mode *mode,
+			  enum drm_mode_status *status);
+};
+
+enum tegra_output_type {
+	TEGRA_OUTPUT_RGB,
+	TEGRA_OUTPUT_HDMI,
+};
+
+struct tegra_output {
+	struct device_node *of_node;
+	struct device *dev;
+
+	const struct tegra_output_ops *ops;
+	enum tegra_output_type type;
+
+	struct i2c_adapter *ddc;
+	const struct edid *edid;
+	unsigned int hpd_irq;
+	int hpd_gpio;
+
+	struct drm_encoder encoder;
+	struct drm_connector connector;
+};
+
+static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
+{
+	return container_of(e, struct tegra_output, encoder);
+}
+
+static inline struct tegra_output *connector_to_output(struct drm_connector *c)
+{
+	return container_of(c, struct tegra_output, connector);
+}
+
+static inline int tegra_output_enable(struct tegra_output *output)
+{
+	if (output && output->ops && output->ops->enable)
+		return output->ops->enable(output);
+
+	return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_disable(struct tegra_output *output)
+{
+	if (output && output->ops && output->ops->disable)
+		return output->ops->disable(output);
+
+	return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_setup_clock(struct tegra_output *output,
+					   struct clk *clk, unsigned long pclk)
+{
+	if (output && output->ops && output->ops->setup_clock)
+		return output->ops->setup_clock(output, clk, pclk);
+
+	return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_check_mode(struct tegra_output *output,
+					  struct drm_display_mode *mode,
+					  enum drm_mode_status *status)
+{
+	if (output && output->ops && output->ops->check_mode)
+		return output->ops->check_mode(output, mode, status);
+
+	return output ? -ENOSYS : -EINVAL;
+}
+
+/* from rgb.c */
+extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
+
+/* from output.c */
+extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
+extern int tegra_output_exit(struct tegra_output *output);
+
+/* from gem.c */
+extern struct tegra_gem_object *tegra_gem_alloc(struct drm_device *drm,
+						size_t size);
+extern int tegra_gem_handle_create(struct drm_device *drm,
+				   struct drm_file *file, size_t size,
+				   unsigned long flags, uint32_t *handle);
+extern int tegra_gem_dumb_create(struct drm_file *file, struct drm_device *drm,
+				 struct drm_mode_create_dumb *args);
+extern int tegra_gem_dumb_map_offset(struct drm_file *file,
+				     struct drm_device *drm, uint32_t handle,
+				     uint64_t *offset);
+extern int tegra_gem_dumb_destroy(struct drm_file *file,
+				  struct drm_device *drm, uint32_t handle);
+extern int tegra_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+extern int tegra_gem_init_object(struct drm_gem_object *obj);
+extern void tegra_gem_free_object(struct drm_gem_object *obj);
+extern struct vm_operations_struct tegra_gem_vm_ops;
+
+/* from fb.c */
+extern int tegra_drm_fb_init(struct drm_device *drm);
+extern void tegra_drm_fb_exit(struct drm_device *drm);
+
+extern struct platform_driver tegra_host1x_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct drm_driver tegra_drm_driver;
+
+#endif /* TEGRA_DRM_H */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
new file mode 100644
index 000000000000..97993c6835fd
--- /dev/null
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
+{
+	struct host1x *host1x = drm->dev_private;
+
+	drm_fbdev_cma_hotplug_event(host1x->fbdev);
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+	.fb_create = drm_fb_cma_create,
+	.output_poll_changed = tegra_drm_fb_output_poll_changed,
+};
+
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+	struct host1x *host1x = drm->dev_private;
+	struct drm_fbdev_cma *fbdev;
+
+	drm->mode_config.min_width = 0;
+	drm->mode_config.min_height = 0;
+
+	drm->mode_config.max_width = 4096;
+	drm->mode_config.max_height = 4096;
+
+	drm->mode_config.funcs = &tegra_drm_mode_funcs;
+
+	fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+				   drm->mode_config.num_connector);
+	if (IS_ERR(fbdev))
+		return PTR_ERR(fbdev);
+
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+	drm_fbdev_cma_restore_mode(fbdev);
+#endif
+
+	host1x->fbdev = fbdev;
+
+	return 0;
+}
+
+void tegra_drm_fb_exit(struct drm_device *drm)
+{
+	struct host1x *host1x = drm->dev_private;
+
+	drm_fbdev_cma_fini(host1x->fbdev);
+}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
new file mode 100644
index 000000000000..ab4016412bbf
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -0,0 +1,1334 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/clk.h>
+
+#include "hdmi.h"
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_hdmi {
+	struct host1x_client client;
+	struct tegra_output output;
+	struct device *dev;
+
+	struct regulator *vdd;
+	struct regulator *pll;
+
+	void __iomem *regs;
+	unsigned int irq;
+
+	struct clk *clk_parent;
+	struct clk *clk;
+
+	unsigned int audio_source;
+	unsigned int audio_freq;
+	bool stereo;
+	bool dvi;
+
+	struct drm_info_list *debugfs_files;
+	struct drm_minor *minor;
+	struct dentry *debugfs;
+};
+
+static inline struct tegra_hdmi *
+host1x_client_to_hdmi(struct host1x_client *client)
+{
+	return container_of(client, struct tegra_hdmi, client);
+}
+
+static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
+{
+	return container_of(output, struct tegra_hdmi, output);
+}
+
+#define HDMI_AUDIOCLK_FREQ 216000000
+#define HDMI_REKEY_DEFAULT 56
+
+enum {
+	AUTO = 0,
+	SPDIF,
+	HDA,
+};
+
+static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi,
+					     unsigned long reg)
+{
+	return readl(hdmi->regs + (reg << 2));
+}
+
+static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val,
+				     unsigned long reg)
+{
+	writel(val, hdmi->regs + (reg << 2));
+}
+
+struct tegra_hdmi_audio_config {
+	unsigned int pclk;
+	unsigned int n;
+	unsigned int cts;
+	unsigned int aval;
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+	{  25200000, 4096,  25200, 24000 },
+	{  27000000, 4096,  27000, 24000 },
+	{  74250000, 4096,  74250, 24000 },
+	{ 148500000, 4096, 148500, 24000 },
+	{         0,    0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+	{  25200000, 5880,  26250, 25000 },
+	{  27000000, 5880,  28125, 25000 },
+	{  74250000, 4704,  61875, 20000 },
+	{ 148500000, 4704, 123750, 20000 },
+	{         0,    0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+	{  25200000, 6144,  25200, 24000 },
+	{  27000000, 6144,  27000, 24000 },
+	{  74250000, 6144,  74250, 24000 },
+	{ 148500000, 6144, 148500, 24000 },
+	{         0,    0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
+	{  25200000, 11760,  26250, 25000 },
+	{  27000000, 11760,  28125, 25000 },
+	{  74250000,  9408,  61875, 20000 },
+	{ 148500000,  9408, 123750, 20000 },
+	{         0,     0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
+	{  25200000, 12288,  25200, 24000 },
+	{  27000000, 12288,  27000, 24000 },
+	{  74250000, 12288,  74250, 24000 },
+	{ 148500000, 12288, 148500, 24000 },
+	{         0,     0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
+	{  25200000, 23520,  26250, 25000 },
+	{  27000000, 23520,  28125, 25000 },
+	{  74250000, 18816,  61875, 20000 },
+	{ 148500000, 18816, 123750, 20000 },
+	{         0,     0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
+	{  25200000, 24576,  25200, 24000 },
+	{  27000000, 24576,  27000, 24000 },
+	{  74250000, 24576,  74250, 24000 },
+	{ 148500000, 24576, 148500, 24000 },
+	{         0,     0,      0,     0 },
+};
+
+struct tmds_config {
+	unsigned int pclk;
+	u32 pll0;
+	u32 pll1;
+	u32 pe_current;
+	u32 drive_current;
+};
+
+static const struct tmds_config tegra2_tmds_config[] = {
+	{ /* 480p modes */
+		.pclk = 27000000,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+			SOR_PLL_TX_REG_LOAD(3),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+		.pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+			PE_CURRENT1(PE_CURRENT_0_0_mA) |
+			PE_CURRENT2(PE_CURRENT_0_0_mA) |
+			PE_CURRENT3(PE_CURRENT_0_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+	}, { /* 720p modes */
+		.pclk = 74250000,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+			SOR_PLL_TX_REG_LOAD(3),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+		.pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+			PE_CURRENT1(PE_CURRENT_6_0_mA) |
+			PE_CURRENT2(PE_CURRENT_6_0_mA) |
+			PE_CURRENT3(PE_CURRENT_6_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+	}, { /* 1080p modes */
+		.pclk = UINT_MAX,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+			SOR_PLL_TX_REG_LOAD(3),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+		.pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+			PE_CURRENT1(PE_CURRENT_6_0_mA) |
+			PE_CURRENT2(PE_CURRENT_6_0_mA) |
+			PE_CURRENT3(PE_CURRENT_6_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+	},
+};
+
+static const struct tmds_config tegra3_tmds_config[] = {
+	{ /* 480p modes */
+		.pclk = 27000000,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+			SOR_PLL_TX_REG_LOAD(0),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+		.pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+			PE_CURRENT1(PE_CURRENT_0_0_mA) |
+			PE_CURRENT2(PE_CURRENT_0_0_mA) |
+			PE_CURRENT3(PE_CURRENT_0_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+	}, { /* 720p modes */
+		.pclk = 74250000,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+			SOR_PLL_TX_REG_LOAD(0),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+		.pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+			PE_CURRENT1(PE_CURRENT_5_0_mA) |
+			PE_CURRENT2(PE_CURRENT_5_0_mA) |
+			PE_CURRENT3(PE_CURRENT_5_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+	}, { /* 1080p modes */
+		.pclk = UINT_MAX,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
+			SOR_PLL_TX_REG_LOAD(0),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+		.pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+			PE_CURRENT1(PE_CURRENT_5_0_mA) |
+			PE_CURRENT2(PE_CURRENT_5_0_mA) |
+			PE_CURRENT3(PE_CURRENT_5_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+	},
+};
+
+static const struct tegra_hdmi_audio_config *
+tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+{
+	const struct tegra_hdmi_audio_config *table;
+
+	switch (audio_freq) {
+	case 32000:
+		table = tegra_hdmi_audio_32k;
+		break;
+
+	case 44100:
+		table = tegra_hdmi_audio_44_1k;
+		break;
+
+	case 48000:
+		table = tegra_hdmi_audio_48k;
+		break;
+
+	case 88200:
+		table = tegra_hdmi_audio_88_2k;
+		break;
+
+	case 96000:
+		table = tegra_hdmi_audio_96k;
+		break;
+
+	case 176400:
+		table = tegra_hdmi_audio_176_4k;
+		break;
+
+	case 192000:
+		table = tegra_hdmi_audio_192k;
+		break;
+
+	default:
+		return NULL;
+	}
+
+	while (table->pclk) {
+		if (table->pclk == pclk)
+			return table;
+
+		table++;
+	}
+
+	return NULL;
+}
+
+static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
+{
+	const unsigned int freqs[] = {
+		32000, 44100, 48000, 88200, 96000, 176400, 192000
+	};
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+		unsigned int f = freqs[i];
+		unsigned int eight_half;
+		unsigned long value;
+		unsigned int delta;
+
+		if (f > 96000)
+			delta = 2;
+		else if (f > 480000)
+			delta = 6;
+		else
+			delta = 9;
+
+		eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+		value = AUDIO_FS_LOW(eight_half - delta) |
+			AUDIO_FS_HIGH(eight_half + delta);
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
+	}
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+{
+	struct device_node *node = hdmi->dev->of_node;
+	const struct tegra_hdmi_audio_config *config;
+	unsigned int offset = 0;
+	unsigned long value;
+
+	switch (hdmi->audio_source) {
+	case HDA:
+		value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+		break;
+
+	case SPDIF:
+		value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+		break;
+
+	default:
+		value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+		break;
+	}
+
+	if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+		value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+			 AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+	} else {
+		value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+
+		value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+			AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+	}
+
+	config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+	if (!config) {
+		dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
+			hdmi->audio_freq, pclk);
+		return -EINVAL;
+	}
+
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+	value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
+		AUDIO_N_VALUE(config->n - 1);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+	tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+			  HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+	value = ACR_SUBPACK_CTS(config->cts);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+	value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
+
+	value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
+	value &= ~AUDIO_N_RESETF;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+	if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+		switch (hdmi->audio_freq) {
+		case 32000:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
+			break;
+
+		case 44100:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
+			break;
+
+		case 48000:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
+			break;
+
+		case 88200:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
+			break;
+
+		case 96000:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
+			break;
+
+		case 176400:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
+			break;
+
+		case 192000:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
+			break;
+		}
+
+		tegra_hdmi_writel(hdmi, config->aval, offset);
+	}
+
+	tegra_hdmi_setup_audio_fs_tables(hdmi);
+
+	return 0;
+}
+
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
+				      unsigned int offset, u8 type,
+				      u8 version, void *data, size_t size)
+{
+	unsigned long value;
+	u8 *ptr = data;
+	u32 subpack[2];
+	size_t i;
+	u8 csum;
+
+	/* first byte of data is the checksum */
+	csum = type + version + size - 1;
+
+	for (i = 1; i < size; i++)
+		csum += ptr[i];
+
+	ptr[0] = 0x100 - csum;
+
+	value = INFOFRAME_HEADER_TYPE(type) |
+		INFOFRAME_HEADER_VERSION(version) |
+		INFOFRAME_HEADER_LEN(size - 1);
+	tegra_hdmi_writel(hdmi, value, offset);
+
+	/* The audio inforame only has one set of subpack registers.  The hdmi
+	 * block pads the rest of the data as per the spec so we have to fixup
+	 * the length before filling in the subpacks.
+	 */
+	if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
+		size = 6;
+
+	/* each subpack 7 bytes devided into:
+	 *   subpack_low - bytes 0 - 3
+	 *   subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
+	 */
+	for (i = 0; i < size; i++) {
+		size_t index = i % 7;
+
+		if (index == 0)
+			memset(subpack, 0x0, sizeof(subpack));
+
+		((u8 *)subpack)[index] = ptr[i];
+
+		if (index == 6 || (i + 1 == size)) {
+			unsigned int reg = offset + 1 + (i / 7) * 2;
+
+			tegra_hdmi_writel(hdmi, subpack[0], reg);
+			tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
+		}
+	}
+}
+
+static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
+					   struct drm_display_mode *mode)
+{
+	struct hdmi_avi_infoframe frame;
+	unsigned int h_front_porch;
+	unsigned int hsize = 16;
+	unsigned int vsize = 9;
+
+	if (hdmi->dvi) {
+		tegra_hdmi_writel(hdmi, 0,
+				  HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+		return;
+	}
+
+	h_front_porch = mode->htotal - mode->hsync_end;
+	memset(&frame, 0, sizeof(frame));
+	frame.r = HDMI_AVI_R_SAME;
+
+	switch (mode->vdisplay) {
+	case 480:
+		if (mode->hdisplay == 640) {
+			frame.m = HDMI_AVI_M_4_3;
+			frame.vic = 1;
+		} else {
+			frame.m = HDMI_AVI_M_16_9;
+			frame.vic = 3;
+		}
+		break;
+
+	case 576:
+		if (((hsize * 10) / vsize) > 14) {
+			frame.m = HDMI_AVI_M_16_9;
+			frame.vic = 18;
+		} else {
+			frame.m = HDMI_AVI_M_4_3;
+			frame.vic = 17;
+		}
+		break;
+
+	case 720:
+	case 1470: /* stereo mode */
+		frame.m = HDMI_AVI_M_16_9;
+
+		if (h_front_porch == 110)
+			frame.vic = 4;
+		else
+			frame.vic = 19;
+		break;
+
+	case 1080:
+	case 2205: /* stereo mode */
+		frame.m = HDMI_AVI_M_16_9;
+
+		switch (h_front_porch) {
+		case 88:
+			frame.vic = 16;
+			break;
+
+		case 528:
+			frame.vic = 31;
+			break;
+
+		default:
+			frame.vic = 32;
+			break;
+		}
+		break;
+
+	default:
+		frame.m = HDMI_AVI_M_16_9;
+		frame.vic = 0;
+		break;
+	}
+
+	tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
+				  HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
+				  &frame, sizeof(frame));
+
+	tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+			  HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+	struct hdmi_audio_infoframe frame;
+
+	if (hdmi->dvi) {
+		tegra_hdmi_writel(hdmi, 0,
+				  HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+		return;
+	}
+
+	memset(&frame, 0, sizeof(frame));
+	frame.cc = HDMI_AUDIO_CC_2;
+
+	tegra_hdmi_write_infopack(hdmi,
+				  HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
+				  HDMI_INFOFRAME_TYPE_AUDIO,
+				  HDMI_AUDIO_VERSION,
+				  &frame, sizeof(frame));
+
+	tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+			  HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+	struct hdmi_stereo_infoframe frame;
+	unsigned long value;
+
+	if (!hdmi->stereo) {
+		value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+		value &= ~GENERIC_CTRL_ENABLE;
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+		return;
+	}
+
+	memset(&frame, 0, sizeof(frame));
+	frame.regid0 = 0x03;
+	frame.regid1 = 0x0c;
+	frame.regid2 = 0x00;
+	frame.hdmi_video_format = 2;
+
+	/* TODO: 74 MHz limit? */
+	if (1) {
+		frame._3d_structure = 0;
+	} else {
+		frame._3d_structure = 8;
+		frame._3d_ext_data = 0;
+	}
+
+	tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
+				  HDMI_INFOFRAME_TYPE_VENDOR,
+				  HDMI_VENDOR_VERSION, &frame, 6);
+
+	value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+	value |= GENERIC_CTRL_ENABLE;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
+				  const struct tmds_config *tmds)
+{
+	unsigned long value;
+
+	tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
+	tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
+	tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
+
+	value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+}
+
+static int tegra_output_hdmi_enable(struct tegra_output *output)
+{
+	unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+	struct drm_display_mode *mode = &dc->base.mode;
+	struct tegra_hdmi *hdmi = to_hdmi(output);
+	struct device_node *node = hdmi->dev->of_node;
+	unsigned int pulse_start, div82, pclk;
+	const struct tmds_config *tmds;
+	unsigned int num_tmds;
+	unsigned long value;
+	int retries = 1000;
+	int err;
+
+	pclk = mode->clock * 1000;
+	h_sync_width = mode->hsync_end - mode->hsync_start;
+	h_front_porch = mode->htotal - mode->hsync_end;
+	h_back_porch = mode->hsync_start - mode->hdisplay;
+
+	err = regulator_enable(hdmi->vdd);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
+		return err;
+	}
+
+	err = regulator_enable(hdmi->pll);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
+		return err;
+	}
+
+	/*
+	 * This assumes that the display controller will divide its parent
+	 * clock by 2 to generate the pixel clock.
+	 */
+	err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to setup clock: %d\n", err);
+		return err;
+	}
+
+	err = clk_set_rate(hdmi->clk, pclk);
+	if (err < 0)
+		return err;
+
+	err = clk_enable(hdmi->clk);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
+		return err;
+	}
+
+	tegra_periph_reset_assert(hdmi->clk);
+	usleep_range(1000, 2000);
+	tegra_periph_reset_deassert(hdmi->clk);
+
+	tegra_dc_writel(dc, VSYNC_H_POSITION(1),
+			DC_DISP_DISP_TIMING_OPTIONS);
+	tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+			DC_DISP_DISP_COLOR_CONTROL);
+
+	/* video_preamble uses h_pulse2 */
+	pulse_start = 1 + h_sync_width + h_back_porch - 10;
+
+	tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+	value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
+		PULSE_LAST_END_A;
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+	value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+	value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
+		VSYNC_WINDOW_ENABLE;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+	if (dc->pipe)
+		value = HDMI_SRC_DISPLAYB;
+	else
+		value = HDMI_SRC_DISPLAYA;
+
+	if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
+					(mode->vdisplay == 576)))
+		tegra_hdmi_writel(hdmi,
+				  value | ARM_VIDEO_RANGE_FULL,
+				  HDMI_NV_PDISP_INPUT_CONTROL);
+	else
+		tegra_hdmi_writel(hdmi,
+				  value | ARM_VIDEO_RANGE_LIMITED,
+				  HDMI_NV_PDISP_INPUT_CONTROL);
+
+	div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+	value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+
+	if (!hdmi->dvi) {
+		err = tegra_hdmi_setup_audio(hdmi, pclk);
+		if (err < 0)
+			hdmi->dvi = true;
+	}
+
+	if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
+		/*
+		 * TODO: add ELD support
+		 */
+	}
+
+	rekey = HDMI_REKEY_DEFAULT;
+	value = HDMI_CTRL_REKEY(rekey);
+	value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
+					  h_front_porch - rekey - 18) / 32);
+
+	if (!hdmi->dvi)
+		value |= HDMI_CTRL_ENABLE;
+
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
+
+	if (hdmi->dvi)
+		tegra_hdmi_writel(hdmi, 0x0,
+				  HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+	else
+		tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+				  HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+	tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+	tegra_hdmi_setup_audio_infoframe(hdmi);
+	tegra_hdmi_setup_stereo_infoframe(hdmi);
+
+	/* TMDS CONFIG */
+	if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+		num_tmds = ARRAY_SIZE(tegra3_tmds_config);
+		tmds = tegra3_tmds_config;
+	} else {
+		num_tmds = ARRAY_SIZE(tegra2_tmds_config);
+		tmds = tegra2_tmds_config;
+	}
+
+	for (i = 0; i < num_tmds; i++) {
+		if (pclk <= tmds[i].pclk) {
+			tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+			break;
+		}
+	}
+
+	tegra_hdmi_writel(hdmi,
+			  SOR_SEQ_CTL_PU_PC(0) |
+			  SOR_SEQ_PU_PC_ALT(0) |
+			  SOR_SEQ_PD_PC(8) |
+			  SOR_SEQ_PD_PC_ALT(8),
+			  HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+	value = SOR_SEQ_INST_WAIT_TIME(1) |
+		SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+		SOR_SEQ_INST_HALT |
+		SOR_SEQ_INST_PIN_A_LOW |
+		SOR_SEQ_INST_PIN_B_LOW |
+		SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
+
+	value = 0x1c800;
+	value &= ~SOR_CSTM_ROTCLK(~0);
+	value |= SOR_CSTM_ROTCLK(2);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
+
+	tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	/* start SOR */
+	tegra_hdmi_writel(hdmi,
+			  SOR_PWR_NORMAL_STATE_PU |
+			  SOR_PWR_NORMAL_START_NORMAL |
+			  SOR_PWR_SAFE_STATE_PD |
+			  SOR_PWR_SETTING_NEW_TRIGGER,
+			  HDMI_NV_PDISP_SOR_PWR);
+	tegra_hdmi_writel(hdmi,
+			  SOR_PWR_NORMAL_STATE_PU |
+			  SOR_PWR_NORMAL_START_NORMAL |
+			  SOR_PWR_SAFE_STATE_PD |
+			  SOR_PWR_SETTING_NEW_DONE,
+			  HDMI_NV_PDISP_SOR_PWR);
+
+	do {
+		BUG_ON(--retries < 0);
+		value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+	} while (value & SOR_PWR_SETTING_NEW_PENDING);
+
+	value = SOR_STATE_ASY_CRCMODE_COMPLETE |
+		SOR_STATE_ASY_OWNER_HEAD0 |
+		SOR_STATE_ASY_SUBOWNER_BOTH |
+		SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+		SOR_STATE_ASY_DEPOL_POS;
+
+	/* setup sync polarities */
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		value |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		value |= SOR_STATE_ASY_HSYNCPOL_NEG;
+
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		value |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		value |= SOR_STATE_ASY_VSYNCPOL_NEG;
+
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
+
+	value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
+
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+	tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+	tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
+			  HDMI_NV_PDISP_SOR_STATE1);
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+	tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+	value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+		PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+	value = DISP_CTRL_MODE_C_DISPLAY;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	/* TODO: add HDCP support */
+
+	return 0;
+}
+
+static int tegra_output_hdmi_disable(struct tegra_output *output)
+{
+	struct tegra_hdmi *hdmi = to_hdmi(output);
+
+	tegra_periph_reset_assert(hdmi->clk);
+	clk_disable(hdmi->clk);
+	regulator_disable(hdmi->pll);
+	regulator_disable(hdmi->vdd);
+
+	return 0;
+}
+
+static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
+					 struct clk *clk, unsigned long pclk)
+{
+	struct tegra_hdmi *hdmi = to_hdmi(output);
+	struct clk *base;
+	int err;
+
+	err = clk_set_parent(clk, hdmi->clk_parent);
+	if (err < 0) {
+		dev_err(output->dev, "failed to set parent: %d\n", err);
+		return err;
+	}
+
+	base = clk_get_parent(hdmi->clk_parent);
+
+	/*
+	 * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+	 * respectively, each of which divides the base pll_d by 2.
+	 */
+	err = clk_set_rate(base, pclk * 2);
+	if (err < 0)
+		dev_err(output->dev,
+			"failed to set base clock rate to %lu Hz\n",
+			pclk * 2);
+
+	return 0;
+}
+
+static int tegra_output_hdmi_check_mode(struct tegra_output *output,
+					struct drm_display_mode *mode,
+					enum drm_mode_status *status)
+{
+	struct tegra_hdmi *hdmi = to_hdmi(output);
+	unsigned long pclk = mode->clock * 1000;
+	struct clk *parent;
+	long err;
+
+	parent = clk_get_parent(hdmi->clk_parent);
+
+	err = clk_round_rate(parent, pclk * 4);
+	if (err < 0)
+		*status = MODE_NOCLOCK;
+	else
+		*status = MODE_OK;
+
+	return 0;
+}
+
+static const struct tegra_output_ops hdmi_ops = {
+	.enable = tegra_output_hdmi_enable,
+	.disable = tegra_output_hdmi_disable,
+	.setup_clock = tegra_output_hdmi_setup_clock,
+	.check_mode = tegra_output_hdmi_check_mode,
+};
+
+static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_hdmi *hdmi = node->info_ent->data;
+
+#define DUMP_REG(name)						\
+	seq_printf(s, "%-56s %#05x %08lx\n", #name, name,	\
+		tegra_hdmi_readl(hdmi, name))
+
+	DUMP_REG(HDMI_CTXSW);
+	DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+	DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+	DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+	DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15));
+	DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+	DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+	DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+	DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+	DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+	DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+	DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+	DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+	DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+	DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+	DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+	DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+	DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+
+#undef DUMP_REG
+
+	return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+	{ "regs", tegra_hdmi_show_regs, 0, NULL },
+};
+
+static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi,
+				   struct drm_minor *minor)
+{
+	unsigned int i;
+	int err;
+
+	hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root);
+	if (!hdmi->debugfs)
+		return -ENOMEM;
+
+	hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+				      GFP_KERNEL);
+	if (!hdmi->debugfs_files) {
+		err = -ENOMEM;
+		goto remove;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+		hdmi->debugfs_files[i].data = hdmi;
+
+	err = drm_debugfs_create_files(hdmi->debugfs_files,
+				       ARRAY_SIZE(debugfs_files),
+				       hdmi->debugfs, minor);
+	if (err < 0)
+		goto free;
+
+	hdmi->minor = minor;
+
+	return 0;
+
+free:
+	kfree(hdmi->debugfs_files);
+	hdmi->debugfs_files = NULL;
+remove:
+	debugfs_remove(hdmi->debugfs);
+	hdmi->debugfs = NULL;
+
+	return err;
+}
+
+static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
+{
+	drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
+				 hdmi->minor);
+	hdmi->minor = NULL;
+
+	kfree(hdmi->debugfs_files);
+	hdmi->debugfs_files = NULL;
+
+	debugfs_remove(hdmi->debugfs);
+	hdmi->debugfs = NULL;
+
+	return 0;
+}
+
+static int tegra_hdmi_drm_init(struct host1x_client *client,
+			       struct drm_device *drm)
+{
+	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+	int err;
+
+	hdmi->output.type = TEGRA_OUTPUT_HDMI;
+	hdmi->output.dev = client->dev;
+	hdmi->output.ops = &hdmi_ops;
+
+	err = tegra_output_init(drm, &hdmi->output);
+	if (err < 0) {
+		dev_err(client->dev, "output setup failed: %d\n", err);
+		return err;
+	}
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+		if (err < 0)
+			dev_err(client->dev, "debugfs setup failed: %d\n", err);
+	}
+
+	return 0;
+}
+
+static int tegra_hdmi_drm_exit(struct host1x_client *client)
+{
+	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+	int err;
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_hdmi_debugfs_exit(hdmi);
+		if (err < 0)
+			dev_err(client->dev, "debugfs cleanup failed: %d\n",
+				err);
+	}
+
+	err = tegra_output_disable(&hdmi->output);
+	if (err < 0) {
+		dev_err(client->dev, "output failed to disable: %d\n", err);
+		return err;
+	}
+
+	err = tegra_output_exit(&hdmi->output);
+	if (err < 0) {
+		dev_err(client->dev, "output cleanup failed: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct host1x_client_ops hdmi_client_ops = {
+	.drm_init = tegra_hdmi_drm_init,
+	.drm_exit = tegra_hdmi_drm_exit,
+};
+
+static int tegra_hdmi_probe(struct platform_device *pdev)
+{
+	struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+	struct tegra_hdmi *hdmi;
+	struct resource *regs;
+	int err;
+
+	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+	if (!hdmi)
+		return -ENOMEM;
+
+	hdmi->dev = &pdev->dev;
+	hdmi->audio_source = AUTO;
+	hdmi->audio_freq = 44100;
+	hdmi->stereo = false;
+	hdmi->dvi = false;
+
+	hdmi->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(hdmi->clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		return PTR_ERR(hdmi->clk);
+	}
+
+	err = clk_prepare(hdmi->clk);
+	if (err < 0)
+		return err;
+
+	hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+	if (IS_ERR(hdmi->clk_parent))
+		return PTR_ERR(hdmi->clk_parent);
+
+	err = clk_prepare(hdmi->clk_parent);
+	if (err < 0)
+		return err;
+
+	err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
+		return err;
+	}
+
+	hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(hdmi->vdd)) {
+		dev_err(&pdev->dev, "failed to get VDD regulator\n");
+		return PTR_ERR(hdmi->vdd);
+	}
+
+	hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
+	if (IS_ERR(hdmi->pll)) {
+		dev_err(&pdev->dev, "failed to get PLL regulator\n");
+		return PTR_ERR(hdmi->pll);
+	}
+
+	hdmi->output.dev = &pdev->dev;
+
+	err = tegra_output_parse_dt(&hdmi->output);
+	if (err < 0)
+		return err;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENXIO;
+
+	hdmi->regs = devm_request_and_ioremap(&pdev->dev, regs);
+	if (!hdmi->regs)
+		return -EADDRNOTAVAIL;
+
+	err = platform_get_irq(pdev, 0);
+	if (err < 0)
+		return err;
+
+	hdmi->irq = err;
+
+	hdmi->client.ops = &hdmi_client_ops;
+	INIT_LIST_HEAD(&hdmi->client.list);
+	hdmi->client.dev = &pdev->dev;
+
+	err = host1x_register_client(host1x, &hdmi->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, hdmi);
+
+	return 0;
+}
+
+static int tegra_hdmi_remove(struct platform_device *pdev)
+{
+	struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+	struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
+	int err;
+
+	err = host1x_unregister_client(host1x, &hdmi->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	clk_unprepare(hdmi->clk_parent);
+	clk_unprepare(hdmi->clk);
+
+	return 0;
+}
+
+static struct of_device_id tegra_hdmi_of_match[] = {
+	{ .compatible = "nvidia,tegra30-hdmi", },
+	{ .compatible = "nvidia,tegra20-hdmi", },
+	{ },
+};
+
+struct platform_driver tegra_hdmi_driver = {
+	.driver = {
+		.name = "tegra-hdmi",
+		.owner = THIS_MODULE,
+		.of_match_table = tegra_hdmi_of_match,
+	},
+	.probe = tegra_hdmi_probe,
+	.remove = tegra_hdmi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
new file mode 100644
index 000000000000..1477f36eb45a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_HDMI_H
+#define TEGRA_HDMI_H 1
+
+#define HDMI_INFOFRAME_TYPE_VENDOR   0x81
+#define HDMI_INFOFRAME_TYPE_AVI      0x82
+#define HDMI_INFOFRAME_TYPE_SPD      0x83
+#define HDMI_INFOFRAME_TYPE_AUDIO    0x84
+#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
+#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
+
+/* all fields little endian */
+struct hdmi_avi_infoframe {
+	/* PB0 */
+	u8 csum;
+
+	/* PB1 */
+	unsigned s:2; /* scan information */
+	unsigned b:2; /* bar info data valid */
+	unsigned a:1; /* active info present */
+	unsigned y:2; /* RGB or YCbCr */
+	unsigned res1:1;
+
+	/* PB2 */
+	unsigned r:4; /* active format aspect ratio */
+	unsigned m:2; /* picture aspect ratio */
+	unsigned c:2; /* colorimetry */
+
+	/* PB3 */
+	unsigned sc:2;  /* scan information */
+	unsigned q:2;   /* quantization range */
+	unsigned ec:3;  /* extended colorimetry */
+	unsigned itc:1; /* it content */
+
+	/* PB4 */
+	unsigned vic:7; /* video format id code */
+	unsigned res4:1;
+
+	/* PB5 */
+	unsigned pr:4; /* pixel repetition factor */
+	unsigned cn:2; /* it content type*/
+	unsigned yq:2; /* ycc quantization range */
+
+	/* PB6-7 */
+	u16 top_bar_end_line;
+
+	/* PB8-9 */
+	u16 bot_bar_start_line;
+
+	/* PB10-11 */
+	u16 left_bar_end_pixel;
+
+	/* PB12-13 */
+	u16 right_bar_start_pixel;
+} __packed;
+
+#define HDMI_AVI_VERSION 0x02
+
+#define HDMI_AVI_Y_RGB       0x0
+#define HDMI_AVI_Y_YCBCR_422 0x1
+#define HDMI_AVI_Y_YCBCR_444 0x2
+
+#define HDMI_AVI_B_VERT  0x1
+#define HDMI_AVI_B_HORIZ 0x2
+
+#define HDMI_AVI_S_NONE      0x0
+#define HDMI_AVI_S_OVERSCAN  0x1
+#define HDMI_AVI_S_UNDERSCAN 0x2
+
+#define HDMI_AVI_C_NONE     0x0
+#define HDMI_AVI_C_SMPTE    0x1
+#define HDMI_AVI_C_ITU_R    0x2
+#define HDMI_AVI_C_EXTENDED 0x4
+
+#define HDMI_AVI_M_4_3  0x1
+#define HDMI_AVI_M_16_9 0x2
+
+#define HDMI_AVI_R_SAME        0x8
+#define HDMI_AVI_R_4_3_CENTER  0x9
+#define HDMI_AVI_R_16_9_CENTER 0xa
+#define HDMI_AVI_R_14_9_CENTER 0xb
+
+/* all fields little endian */
+struct hdmi_audio_infoframe {
+	/* PB0 */
+	u8 csum;
+
+	/* PB1 */
+	unsigned cc:3; /* channel count */
+	unsigned res1:1;
+	unsigned ct:4; /* coding type */
+
+	/* PB2 */
+	unsigned ss:2; /* sample size */
+	unsigned sf:3; /* sample frequency */
+	unsigned res2:3;
+
+	/* PB3 */
+	unsigned cxt:5; /* coding extention type */
+	unsigned res3:3;
+
+	/* PB4 */
+	u8 ca; /* channel/speaker allocation */
+
+	/* PB5 */
+	unsigned res5:3;
+	unsigned lsv:4; /* level shift value */
+	unsigned dm_inh:1; /* downmix inhibit */
+
+	/* PB6-10 reserved */
+	u8 res6;
+	u8 res7;
+	u8 res8;
+	u8 res9;
+	u8 res10;
+} __packed;
+
+#define HDMI_AUDIO_VERSION 0x01
+
+#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CC_2      0x1
+#define HDMI_AUDIO_CC_3      0x2
+#define HDMI_AUDIO_CC_4      0x3
+#define HDMI_AUDIO_CC_5      0x4
+#define HDMI_AUDIO_CC_6      0x5
+#define HDMI_AUDIO_CC_7      0x6
+#define HDMI_AUDIO_CC_8      0x7
+
+#define HDMI_AUDIO_CT_STREAM  0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CT_PCM     0x1
+#define HDMI_AUDIO_CT_AC3     0x2
+#define HDMI_AUDIO_CT_MPEG1   0x3
+#define HDMI_AUDIO_CT_MP3     0x4
+#define HDMI_AUDIO_CT_MPEG2   0x5
+#define HDMI_AUDIO_CT_AAC_LC  0x6
+#define HDMI_AUDIO_CT_DTS     0x7
+#define HDMI_AUDIO_CT_ATRAC   0x8
+#define HDMI_AUDIO_CT_DSD     0x9
+#define HDMI_AUDIO_CT_E_AC3   0xa
+#define HDMI_AUDIO_CT_DTS_HD  0xb
+#define HDMI_AUDIO_CT_MLP     0xc
+#define HDMI_AUDIO_CT_DST     0xd
+#define HDMI_AUDIO_CT_WMA_PRO 0xe
+#define HDMI_AUDIO_CT_CXT     0xf
+
+#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUIDO_SF_32K    0x1
+#define HDMI_AUDIO_SF_44_1K  0x2
+#define HDMI_AUDIO_SF_48K    0x3
+#define HDMI_AUDIO_SF_88_2K  0x4
+#define HDMI_AUDIO_SF_96K    0x5
+#define HDMI_AUDIO_SF_176_4K 0x6
+#define HDMI_AUDIO_SF_192K   0x7
+
+#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_SS_16BIT  0x1
+#define HDMI_AUDIO_SS_20BIT  0x2
+#define HDMI_AUDIO_SS_24BIT  0x3
+
+#define HDMI_AUDIO_CXT_CT            0x0 /* refer to coding in CT */
+#define HDMI_AUDIO_CXT_HE_AAC        0x1
+#define HDMI_AUDIO_CXT_HE_AAC_V2     0x2
+#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
+
+/* all fields little endian */
+struct hdmi_stereo_infoframe {
+	/* PB0 */
+	u8 csum;
+
+	/* PB1 */
+	u8 regid0;
+
+	/* PB2 */
+	u8 regid1;
+
+	/* PB3 */
+	u8 regid2;
+
+	/* PB4 */
+	unsigned res1:5;
+	unsigned hdmi_video_format:3;
+
+	/* PB5 */
+	unsigned res2:4;
+	unsigned _3d_structure:4;
+
+	/* PB6*/
+	unsigned res3:4;
+	unsigned _3d_ext_data:4;
+} __packed;
+
+#define HDMI_VENDOR_VERSION 0x01
+
+/* register definitions */
+#define HDMI_CTXSW						0x00
+
+#define HDMI_NV_PDISP_SOR_STATE0				0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1				0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_NORMAL     (1 << 2)
+#define SOR_STATE_ATTACHED              (1 << 3)
+
+#define HDMI_NV_PDISP_SOR_STATE2				0x03
+#define SOR_STATE_ASY_OWNER_NONE         (0 <<  0)
+#define SOR_STATE_ASY_OWNER_HEAD0        (1 <<  0)
+#define SOR_STATE_ASY_SUBOWNER_NONE      (0 <<  4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0  (1 <<  4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1  (2 <<  4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH      (3 <<  4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE     (0 <<  6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE   (1 <<  6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 <<  6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM        (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS       (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG       (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS       (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG       (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS          (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG          (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB				0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB				0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB				0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB				0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB				0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB				0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB				0x0a
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB				0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB				0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB				0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB				0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB				0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL				0x10
+#define HDMI_NV_PDISP_RG_HDCP_CMODE				0x11
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB			0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB			0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB			0x14
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2			0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1			0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI				0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB				0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB				0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0				0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0			0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1				0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2				0x1d
+
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL			0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS		0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER		0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW		0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH	0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL			0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS			0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER			0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW		0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH		0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW		0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH		0x29
+
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define INFOFRAME_HEADER_TYPE(x)    (((x) & 0xff) <<  0)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) <<  8)
+#define INFOFRAME_HEADER_LEN(x)     (((x) & 0x0f) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL				0x2a
+#define GENERIC_CTRL_ENABLE (1 <<  0)
+#define GENERIC_CTRL_OTHER  (1 <<  4)
+#define GENERIC_CTRL_SINGLE (1 <<  8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO  (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS			0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER			0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW			0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH		0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW			0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH		0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW			0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH		0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW			0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH		0x34
+
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL				0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW			0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH		0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW			0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH		0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW			0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH		0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW			0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH		0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW			0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH		0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW			0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH		0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW			0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH		0x43
+
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+#define ACR_SUBPACK_N(x)   (((x) & 0xffffff) << 0)
+#define ACR_ENABLE         (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_CTRL					0x44
+#define HDMI_CTRL_REKEY(x)         (((x) & 0x7f) <<  0)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE           (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT			0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW				0x46
+#define VSYNC_WINDOW_END(x)   (((x) & 0x3ff) <<  0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE   (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL				0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS				0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK				0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1			0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2			0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0					0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1					0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA				0x4e
+
+#define HDMI_NV_PDISP_HDMI_SPARE				0x4f
+#define SPARE_HW_CTS           (1 << 0)
+#define SPARE_FORCE_SW_CTS     (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1			0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2			0x51
+#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL			0x53
+#define HDMI_NV_PDISP_SOR_CAP					0x54
+#define HDMI_NV_PDISP_SOR_PWR					0x55
+#define SOR_PWR_NORMAL_STATE_PD     (0 <<  0)
+#define SOR_PWR_NORMAL_STATE_PU     (1 <<  0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 <<  1)
+#define SOR_PWR_NORMAL_START_ALT    (1 <<  1)
+#define SOR_PWR_SAFE_STATE_PD       (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU       (1 << 16)
+#define SOR_PWR_SETTING_NEW_DONE    (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST					0x56
+#define HDMI_NV_PDISP_SOR_PLL0					0x57
+#define SOR_PLL_PWR            (1 << 0)
+#define SOR_PLL_PDBG           (1 << 1)
+#define SOR_PLL_VCAPD          (1 << 2)
+#define SOR_PLL_PDPORT         (1 << 3)
+#define SOR_PLL_RESISTORSEL    (1 << 4)
+#define SOR_PLL_PULLDOWN       (1 << 5)
+#define SOR_PLL_VCOCAP(x)      (((x) & 0xf) <<  8)
+#define SOR_PLL_BG_V17_S(x)    (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x)      (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x)      (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1					0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x)  (((x) & 0xf) <<  9)
+#define SOR_PLL_LOADADJ(x)       (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN            (1 << 28)
+#define SOR_PLL_HALF_FULL_PE     (1 << 29)
+#define SOR_PLL_S_D_PIN_PE       (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2					0x59
+
+#define HDMI_NV_PDISP_SOR_CSTM					0x5a
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+
+#define HDMI_NV_PDISP_SOR_LVDS					0x5b
+#define HDMI_NV_PDISP_SOR_CRCA					0x5c
+#define HDMI_NV_PDISP_SOR_CRCB					0x5d
+#define HDMI_NV_PDISP_SOR_BLANK					0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL				0x5f
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) <<  0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) <<  4)
+#define SOR_SEQ_PD_PC(x)     (((x) & 0xf) <<  8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x)        (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS       (1 << 28)
+#define SOR_SEQ_SWITCH       (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST(x)				(0x60 + (x))
+
+#define SOR_SEQ_INST_WAIT_TIME(x)     (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT             (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW        (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH       (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW        (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH       (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0				0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1				0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0				0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1				0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0				0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1				0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0				0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1				0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0				0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1				0x7b
+#define HDMI_NV_PDISP_SOR_TRIG					0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK				0x7d
+
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT			0x7e
+#define DRIVE_CURRENT_LANE0(x)      (((x) & 0x3f) <<  0)
+#define DRIVE_CURRENT_LANE1(x)      (((x) & 0x3f) <<  8)
+#define DRIVE_CURRENT_LANE2(x)      (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x)      (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+
+#define DRIVE_CURRENT_1_500_mA  0x00
+#define DRIVE_CURRENT_1_875_mA  0x01
+#define DRIVE_CURRENT_2_250_mA  0x02
+#define DRIVE_CURRENT_2_625_mA  0x03
+#define DRIVE_CURRENT_3_000_mA  0x04
+#define DRIVE_CURRENT_3_375_mA  0x05
+#define DRIVE_CURRENT_3_750_mA  0x06
+#define DRIVE_CURRENT_4_125_mA  0x07
+#define DRIVE_CURRENT_4_500_mA  0x08
+#define DRIVE_CURRENT_4_875_mA  0x09
+#define DRIVE_CURRENT_5_250_mA  0x0a
+#define DRIVE_CURRENT_5_625_mA  0x0b
+#define DRIVE_CURRENT_6_000_mA  0x0c
+#define DRIVE_CURRENT_6_375_mA  0x0d
+#define DRIVE_CURRENT_6_750_mA  0x0e
+#define DRIVE_CURRENT_7_125_mA  0x0f
+#define DRIVE_CURRENT_7_500_mA  0x10
+#define DRIVE_CURRENT_7_875_mA  0x11
+#define DRIVE_CURRENT_8_250_mA  0x12
+#define DRIVE_CURRENT_8_625_mA  0x13
+#define DRIVE_CURRENT_9_000_mA  0x14
+#define DRIVE_CURRENT_9_375_mA  0x15
+#define DRIVE_CURRENT_9_750_mA  0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0				0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1				0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2				0x81
+
+#define HDMI_NV_PDISP_AUDIO_FS(x)				(0x82 + (x))
+#define AUDIO_FS_LOW(x)  (((x) & 0xfff) <<  0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH				0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD				0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0				0x8b
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x)  (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO  (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL  (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N					0x8c
+#define AUDIO_N_VALUE(x)           (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF             (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL    (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNATE (1 << 24)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING			0x94
+#define HDMI_NV_PDISP_SOR_REFCLK				0x95
+#define SOR_REFCLK_DIV_INT(x)  (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL				0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL				0x97
+#define HDMI_SRC_DISPLAYA       (0 << 0)
+#define HDMI_SRC_DISPLAYB       (1 << 0)
+#define ARM_VIDEO_RANGE_FULL    (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH					0x98
+#define HDMI_NV_PDISP_PE_CURRENT				0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+
+#define PE_CURRENT_0_0_mA 0x0
+#define PE_CURRENT_0_5_mA 0x1
+#define PE_CURRENT_1_0_mA 0x2
+#define PE_CURRENT_1_5_mA 0x3
+#define PE_CURRENT_2_0_mA 0x4
+#define PE_CURRENT_2_5_mA 0x5
+#define PE_CURRENT_3_0_mA 0x6
+#define PE_CURRENT_3_5_mA 0x7
+#define PE_CURRENT_4_0_mA 0x8
+#define PE_CURRENT_4_5_mA 0x9
+#define PE_CURRENT_5_0_mA 0xa
+#define PE_CURRENT_5_5_mA 0xb
+#define PE_CURRENT_6_0_mA 0xc
+#define PE_CURRENT_6_5_mA 0xd
+#define PE_CURRENT_7_0_mA 0xe
+#define PE_CURRENT_7_5_mA 0xf
+
+#define HDMI_NV_PDISP_KEY_CTRL					0x9a
+#define HDMI_NV_PDISP_KEY_DEBUG0				0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1				0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2				0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0				0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1				0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2				0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3				0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG				0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX				0xa3
+
+#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0				0xac
+#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR			0xbc
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE			0xbd
+
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320    0xbf
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441    0xc0
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882    0xc1
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764    0xc2
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480    0xc3
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960    0xc4
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920    0xc5
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+
+#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
new file mode 100644
index 000000000000..bdb97a564d82
--- /dev/null
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+
+struct host1x_drm_client {
+	struct host1x_client *client;
+	struct device_node *np;
+	struct list_head list;
+};
+
+static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
+{
+	struct host1x_drm_client *client;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&client->list);
+	client->np = of_node_get(np);
+
+	list_add_tail(&client->list, &host1x->drm_clients);
+
+	return 0;
+}
+
+static int host1x_activate_drm_client(struct host1x *host1x,
+				      struct host1x_drm_client *drm,
+				      struct host1x_client *client)
+{
+	mutex_lock(&host1x->drm_clients_lock);
+	list_del_init(&drm->list);
+	list_add_tail(&drm->list, &host1x->drm_active);
+	drm->client = client;
+	mutex_unlock(&host1x->drm_clients_lock);
+
+	return 0;
+}
+
+static int host1x_remove_drm_client(struct host1x *host1x,
+				    struct host1x_drm_client *client)
+{
+	mutex_lock(&host1x->drm_clients_lock);
+	list_del_init(&client->list);
+	mutex_unlock(&host1x->drm_clients_lock);
+
+	of_node_put(client->np);
+	kfree(client);
+
+	return 0;
+}
+
+static int host1x_parse_dt(struct host1x *host1x)
+{
+	static const char * const compat[] = {
+		"nvidia,tegra20-dc",
+		"nvidia,tegra20-hdmi",
+		"nvidia,tegra30-dc",
+		"nvidia,tegra30-hdmi",
+	};
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(compat); i++) {
+		struct device_node *np;
+
+		for_each_child_of_node(host1x->dev->of_node, np) {
+			if (of_device_is_compatible(np, compat[i]) &&
+			    of_device_is_available(np)) {
+				err = host1x_add_drm_client(host1x, np);
+				if (err < 0)
+					return err;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tegra_host1x_probe(struct platform_device *pdev)
+{
+	struct host1x *host1x;
+	struct resource *regs;
+	int err;
+
+	host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
+	if (!host1x)
+		return -ENOMEM;
+
+	mutex_init(&host1x->drm_clients_lock);
+	INIT_LIST_HEAD(&host1x->drm_clients);
+	INIT_LIST_HEAD(&host1x->drm_active);
+	mutex_init(&host1x->clients_lock);
+	INIT_LIST_HEAD(&host1x->clients);
+	host1x->dev = &pdev->dev;
+
+	err = host1x_parse_dt(host1x);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
+		return err;
+	}
+
+	host1x->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(host1x->clk))
+		return PTR_ERR(host1x->clk);
+
+	err = clk_prepare_enable(host1x->clk);
+	if (err < 0)
+		return err;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs) {
+		err = -ENXIO;
+		goto err;
+	}
+
+	err = platform_get_irq(pdev, 0);
+	if (err < 0)
+		goto err;
+
+	host1x->syncpt = err;
+
+	err = platform_get_irq(pdev, 1);
+	if (err < 0)
+		goto err;
+
+	host1x->irq = err;
+
+	host1x->regs = devm_request_and_ioremap(&pdev->dev, regs);
+	if (!host1x->regs) {
+		err = -EADDRNOTAVAIL;
+		goto err;
+	}
+
+	platform_set_drvdata(pdev, host1x);
+
+	return 0;
+
+err:
+	clk_disable_unprepare(host1x->clk);
+	return err;
+}
+
+static int tegra_host1x_remove(struct platform_device *pdev)
+{
+	struct host1x *host1x = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(host1x->clk);
+
+	return 0;
+}
+
+int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
+{
+	struct host1x_client *client;
+
+	mutex_lock(&host1x->clients_lock);
+
+	list_for_each_entry(client, &host1x->clients, list) {
+		if (client->ops && client->ops->drm_init) {
+			int err = client->ops->drm_init(client, drm);
+			if (err < 0) {
+				dev_err(host1x->dev,
+					"DRM setup failed for %s: %d\n",
+					dev_name(client->dev), err);
+				return err;
+			}
+		}
+	}
+
+	mutex_unlock(&host1x->clients_lock);
+
+	return 0;
+}
+
+int host1x_drm_exit(struct host1x *host1x)
+{
+	struct platform_device *pdev = to_platform_device(host1x->dev);
+	struct host1x_client *client;
+
+	if (!host1x->drm)
+		return 0;
+
+	mutex_lock(&host1x->clients_lock);
+
+	list_for_each_entry_reverse(client, &host1x->clients, list) {
+		if (client->ops && client->ops->drm_exit) {
+			int err = client->ops->drm_exit(client);
+			if (err < 0) {
+				dev_err(host1x->dev,
+					"DRM cleanup failed for %s: %d\n",
+					dev_name(client->dev), err);
+				return err;
+			}
+		}
+	}
+
+	mutex_unlock(&host1x->clients_lock);
+
+	drm_platform_exit(&tegra_drm_driver, pdev);
+	host1x->drm = NULL;
+
+	return 0;
+}
+
+int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
+{
+	struct host1x_drm_client *drm, *tmp;
+	int err;
+
+	mutex_lock(&host1x->clients_lock);
+	list_add_tail(&client->list, &host1x->clients);
+	mutex_unlock(&host1x->clients_lock);
+
+	list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
+		if (drm->np == client->dev->of_node)
+			host1x_activate_drm_client(host1x, drm, client);
+
+	if (list_empty(&host1x->drm_clients)) {
+		struct platform_device *pdev = to_platform_device(host1x->dev);
+
+		err = drm_platform_init(&tegra_drm_driver, pdev);
+		if (err < 0) {
+			dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+int host1x_unregister_client(struct host1x *host1x,
+			     struct host1x_client *client)
+{
+	struct host1x_drm_client *drm, *tmp;
+	int err;
+
+	list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
+		if (drm->client == client) {
+			err = host1x_drm_exit(host1x);
+			if (err < 0) {
+				dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
+					err);
+				return err;
+			}
+
+			host1x_remove_drm_client(host1x, drm);
+			break;
+		}
+	}
+
+	mutex_lock(&host1x->clients_lock);
+	list_del_init(&client->list);
+	mutex_unlock(&host1x->clients_lock);
+
+	return 0;
+}
+
+static struct of_device_id tegra_host1x_of_match[] = {
+	{ .compatible = "nvidia,tegra30-host1x", },
+	{ .compatible = "nvidia,tegra20-host1x", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
+
+struct platform_driver tegra_host1x_driver = {
+	.driver = {
+		.name = "tegra-host1x",
+		.owner = THIS_MODULE,
+		.of_match_table = tegra_host1x_of_match,
+	},
+	.probe = tegra_host1x_probe,
+	.remove = tegra_host1x_remove,
+};
+
+static int __init tegra_host1x_init(void)
+{
+	int err;
+
+	err = platform_driver_register(&tegra_host1x_driver);
+	if (err < 0)
+		return err;
+
+	err = platform_driver_register(&tegra_dc_driver);
+	if (err < 0)
+		goto unregister_host1x;
+
+	err = platform_driver_register(&tegra_hdmi_driver);
+	if (err < 0)
+		goto unregister_dc;
+
+	return 0;
+
+unregister_dc:
+	platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+	platform_driver_unregister(&tegra_host1x_driver);
+	return err;
+}
+module_init(tegra_host1x_init);
+
+static void __exit tegra_host1x_exit(void)
+{
+	platform_driver_unregister(&tegra_hdmi_driver);
+	platform_driver_unregister(&tegra_dc_driver);
+	platform_driver_unregister(&tegra_host1x_driver);
+}
+module_exit(tegra_host1x_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
new file mode 100644
index 000000000000..8140fc6c34d8
--- /dev/null
+++ b/drivers/gpu/drm/tegra/output.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
+
+#include "drm.h"
+
+static int tegra_connector_get_modes(struct drm_connector *connector)
+{
+	struct tegra_output *output = connector_to_output(connector);
+	struct edid *edid = NULL;
+	int err = 0;
+
+	if (output->edid)
+		edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
+	else if (output->ddc)
+		edid = drm_get_edid(connector, output->ddc);
+
+	drm_mode_connector_update_edid_property(connector, edid);
+
+	if (edid) {
+		err = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	return err;
+}
+
+static int tegra_connector_mode_valid(struct drm_connector *connector,
+				      struct drm_display_mode *mode)
+{
+	struct tegra_output *output = connector_to_output(connector);
+	enum drm_mode_status status = MODE_OK;
+	int err;
+
+	err = tegra_output_check_mode(output, mode, &status);
+	if (err < 0)
+		return MODE_ERROR;
+
+	return status;
+}
+
+static struct drm_encoder *
+tegra_connector_best_encoder(struct drm_connector *connector)
+{
+	struct tegra_output *output = connector_to_output(connector);
+
+	return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.get_modes = tegra_connector_get_modes,
+	.mode_valid = tegra_connector_mode_valid,
+	.best_encoder = tegra_connector_best_encoder,
+};
+
+static enum drm_connector_status
+tegra_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct tegra_output *output = connector_to_output(connector);
+	enum drm_connector_status status = connector_status_unknown;
+
+	if (gpio_is_valid(output->hpd_gpio)) {
+		if (gpio_get_value(output->hpd_gpio) == 0)
+			status = connector_status_disconnected;
+		else
+			status = connector_status_connected;
+	} else {
+		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+			status = connector_status_connected;
+	}
+
+	return status;
+}
+
+static void tegra_connector_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = tegra_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = tegra_connector_destroy,
+};
+
+static void tegra_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = tegra_encoder_destroy,
+};
+
+static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted)
+{
+	return true;
+}
+
+static void tegra_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_mode_set(struct drm_encoder *encoder,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted)
+{
+	struct tegra_output *output = encoder_to_output(encoder);
+	int err;
+
+	err = tegra_output_enable(output);
+	if (err < 0)
+		dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.dpms = tegra_encoder_dpms,
+	.mode_fixup = tegra_encoder_mode_fixup,
+	.prepare = tegra_encoder_prepare,
+	.commit = tegra_encoder_commit,
+	.mode_set = tegra_encoder_mode_set,
+};
+
+static irqreturn_t hpd_irq(int irq, void *data)
+{
+	struct tegra_output *output = data;
+
+	drm_helper_hpd_irq_event(output->connector.dev);
+
+	return IRQ_HANDLED;
+}
+
+int tegra_output_parse_dt(struct tegra_output *output)
+{
+	enum of_gpio_flags flags;
+	struct device_node *ddc;
+	size_t size;
+	int err;
+
+	if (!output->of_node)
+		output->of_node = output->dev->of_node;
+
+	output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
+
+	ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
+	if (ddc) {
+		output->ddc = of_find_i2c_adapter_by_node(ddc);
+		if (!output->ddc) {
+			err = -EPROBE_DEFER;
+			of_node_put(ddc);
+			return err;
+		}
+
+		of_node_put(ddc);
+	}
+
+	if (!output->edid && !output->ddc)
+		return -ENODEV;
+
+	output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
+						   "nvidia,hpd-gpio", 0,
+						   &flags);
+
+	return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+	int connector, encoder, err;
+
+	if (gpio_is_valid(output->hpd_gpio)) {
+		unsigned long flags;
+
+		err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN,
+				       "HDMI hotplug detect");
+		if (err < 0) {
+			dev_err(output->dev, "gpio_request_one(): %d\n", err);
+			return err;
+		}
+
+		err = gpio_to_irq(output->hpd_gpio);
+		if (err < 0) {
+			dev_err(output->dev, "gpio_to_irq(): %d\n", err);
+			goto free_hpd;
+		}
+
+		output->hpd_irq = err;
+
+		flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			IRQF_ONESHOT;
+
+		err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
+					   flags, "hpd", output);
+		if (err < 0) {
+			dev_err(output->dev, "failed to request IRQ#%u: %d\n",
+				output->hpd_irq, err);
+			goto free_hpd;
+		}
+
+		output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+	}
+
+	switch (output->type) {
+	case TEGRA_OUTPUT_RGB:
+		connector = DRM_MODE_CONNECTOR_LVDS;
+		encoder = DRM_MODE_ENCODER_LVDS;
+		break;
+
+	case TEGRA_OUTPUT_HDMI:
+		connector = DRM_MODE_CONNECTOR_HDMIA;
+		encoder = DRM_MODE_ENCODER_TMDS;
+		break;
+
+	default:
+		connector = DRM_MODE_CONNECTOR_Unknown;
+		encoder = DRM_MODE_ENCODER_NONE;
+		break;
+	}
+
+	drm_connector_init(drm, &output->connector, &connector_funcs,
+			   connector);
+	drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+
+	drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
+	drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
+
+	drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
+	drm_sysfs_connector_add(&output->connector);
+
+	output->encoder.possible_crtcs = 0x3;
+
+	return 0;
+
+free_hpd:
+	gpio_free(output->hpd_gpio);
+
+	return err;
+}
+
+int tegra_output_exit(struct tegra_output *output)
+{
+	if (gpio_is_valid(output->hpd_gpio)) {
+		free_irq(output->hpd_irq, output);
+		gpio_free(output->hpd_gpio);
+	}
+
+	if (output->ddc)
+		put_device(&output->ddc->dev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
new file mode 100644
index 000000000000..ed4416f20260
--- /dev/null
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_rgb {
+	struct tegra_output output;
+	struct clk *clk_parent;
+	struct clk *clk;
+};
+
+static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
+{
+	return container_of(output, struct tegra_rgb, output);
+}
+
+struct reg_entry {
+	unsigned long offset;
+	unsigned long value;
+};
+
+static const struct reg_entry rgb_enable[] = {
+	{ DC_COM_PIN_OUTPUT_ENABLE(0),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(1),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(2),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(3),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(0),     0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(1),     0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(2),     0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(3),     0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(0),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(1),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(2),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(3),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(4),   0x00210222 },
+	{ DC_COM_PIN_OUTPUT_SELECT(5),   0x00002200 },
+	{ DC_COM_PIN_OUTPUT_SELECT(6),   0x00020000 },
+};
+
+static const struct reg_entry rgb_disable[] = {
+	{ DC_COM_PIN_OUTPUT_SELECT(6),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(5),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(4),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(3),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(2),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(1),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(0),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(3),     0xaaaaaaaa },
+	{ DC_COM_PIN_OUTPUT_DATA(2),     0xaaaaaaaa },
+	{ DC_COM_PIN_OUTPUT_DATA(1),     0xaaaaaaaa },
+	{ DC_COM_PIN_OUTPUT_DATA(0),     0xaaaaaaaa },
+	{ DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(3),   0x55555555 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(2),   0x55555555 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(1),   0x55150005 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(0),   0x55555555 },
+};
+
+static void tegra_dc_write_regs(struct tegra_dc *dc,
+				const struct reg_entry *table,
+				unsigned int num)
+{
+	unsigned int i;
+
+	for (i = 0; i < num; i++)
+		tegra_dc_writel(dc, table[i].value, table[i].offset);
+}
+
+static int tegra_output_rgb_enable(struct tegra_output *output)
+{
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+	tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+
+	return 0;
+}
+
+static int tegra_output_rgb_disable(struct tegra_output *output)
+{
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+	tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+
+	return 0;
+}
+
+static int tegra_output_rgb_setup_clock(struct tegra_output *output,
+					struct clk *clk, unsigned long pclk)
+{
+	struct tegra_rgb *rgb = to_rgb(output);
+
+	return clk_set_parent(clk, rgb->clk_parent);
+}
+
+static int tegra_output_rgb_check_mode(struct tegra_output *output,
+				       struct drm_display_mode *mode,
+				       enum drm_mode_status *status)
+{
+	/*
+	 * FIXME: For now, always assume that the mode is okay. There are
+	 * unresolved issues with clk_round_rate(), which doesn't always
+	 * reliably report whether a frequency can be set or not.
+	 */
+
+	*status = MODE_OK;
+
+	return 0;
+}
+
+static const struct tegra_output_ops rgb_ops = {
+	.enable = tegra_output_rgb_enable,
+	.disable = tegra_output_rgb_disable,
+	.setup_clock = tegra_output_rgb_setup_clock,
+	.check_mode = tegra_output_rgb_check_mode,
+};
+
+int tegra_dc_rgb_probe(struct tegra_dc *dc)
+{
+	struct device_node *np;
+	struct tegra_rgb *rgb;
+	int err;
+
+	np = of_get_child_by_name(dc->dev->of_node, "rgb");
+	if (!np || !of_device_is_available(np))
+		return -ENODEV;
+
+	rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
+	if (!rgb)
+		return -ENOMEM;
+
+	rgb->clk = devm_clk_get(dc->dev, NULL);
+	if (IS_ERR(rgb->clk)) {
+		dev_err(dc->dev, "failed to get clock\n");
+		return PTR_ERR(rgb->clk);
+	}
+
+	rgb->clk_parent = devm_clk_get(dc->dev, "parent");
+	if (IS_ERR(rgb->clk_parent)) {
+		dev_err(dc->dev, "failed to get parent clock\n");
+		return PTR_ERR(rgb->clk_parent);
+	}
+
+	err = clk_set_parent(rgb->clk, rgb->clk_parent);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+		return err;
+	}
+
+	rgb->output.dev = dc->dev;
+	rgb->output.of_node = np;
+
+	err = tegra_output_parse_dt(&rgb->output);
+	if (err < 0)
+		return err;
+
+	dc->rgb = &rgb->output;
+
+	return 0;
+}
+
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
+{
+	struct tegra_rgb *rgb = to_rgb(dc->rgb);
+	int err;
+
+	if (!dc->rgb)
+		return -ENODEV;
+
+	rgb->output.type = TEGRA_OUTPUT_RGB;
+	rgb->output.ops = &rgb_ops;
+
+	err = tegra_output_init(dc->base.dev, &rgb->output);
+	if (err < 0) {
+		dev_err(dc->dev, "output setup failed: %d\n", err);
+		return err;
+	}
+
+	/*
+	 * By default, outputs can be associated with each display controller.
+	 * RGB outputs are an exception, so we make sure they can be attached
+	 * to only their parent display controller.
+	 */
+	rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+
+	return 0;
+}
+
+int tegra_dc_rgb_exit(struct tegra_dc *dc)
+{
+	if (dc->rgb) {
+		int err;
+
+		err = tegra_output_disable(dc->rgb);
+		if (err < 0) {
+			dev_err(dc->dev, "output failed to disable: %d\n", err);
+			return err;
+		}
+
+		err = tegra_output_exit(dc->rgb);
+		if (err < 0) {
+			dev_err(dc->dev, "output cleanup failed: %d\n", err);
+			return err;
+		}
+
+		dc->rgb = NULL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bf6e4b5a73b5..a9151337d5b9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -162,9 +162,9 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
 {
 	if (interruptible) {
 		return wait_event_interruptible(bo->event_queue,
-					       atomic_read(&bo->reserved) == 0);
+					       !ttm_bo_is_reserved(bo));
 	} else {
-		wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+		wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
 		return 0;
 	}
 }
@@ -175,7 +175,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man;
 
-	BUG_ON(!atomic_read(&bo->reserved));
+	BUG_ON(!ttm_bo_is_reserved(bo));
 
 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 
@@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
 	struct ttm_bo_global *glob = bo->glob;
 	int ret;
 
-	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+	while (unlikely(atomic_read(&bo->reserved) != 0)) {
 		/**
 		 * Deadlock avoidance for multi-bo reserving.
 		 */
@@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
 			return ret;
 	}
 
+	atomic_set(&bo->reserved, 1);
 	if (use_sequence) {
 		/**
 		 * Wake up waiters that may need to recheck for deadlock,
@@ -365,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *mem,
 				  bool evict, bool interruptible,
-				  bool no_wait_reserve, bool no_wait_gpu)
+				  bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -419,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-		ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
 	else if (bdev->driver->move)
 		ret = bdev->driver->move(bo, evict, interruptible,
-					 no_wait_reserve, no_wait_gpu, mem);
+					 no_wait_gpu, mem);
 	else
-		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
 
 	if (ret) {
 		if (bdev->driver->move_notify) {
@@ -487,40 +488,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 	ttm_bo_mem_put(bo, &bo->mem);
 
 	atomic_set(&bo->reserved, 0);
+	wake_up_all(&bo->event_queue);
 
 	/*
-	 * Make processes trying to reserve really pick it up.
+	 * Since the final reference to this bo may not be dropped by
+	 * the current task we have to put a memory barrier here to make
+	 * sure the changes done in this function are always visible.
+	 *
+	 * This function only needs protection against the final kref_put.
 	 */
-	smp_mb__after_atomic_dec();
-	wake_up_all(&bo->event_queue);
+	smp_mb__before_atomic_dec();
 }
 
 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bo->glob;
-	struct ttm_bo_driver *driver;
+	struct ttm_bo_driver *driver = bdev->driver;
 	void *sync_obj = NULL;
-	void *sync_obj_arg;
 	int put_count;
 	int ret;
 
+	spin_lock(&glob->lru_lock);
+	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
 	spin_lock(&bdev->fence_lock);
 	(void) ttm_bo_wait(bo, false, false, true);
-	if (!bo->sync_obj) {
-
-		spin_lock(&glob->lru_lock);
-
-		/**
-		 * Lock inversion between bo:reserve and bdev::fence_lock here,
-		 * but that's OK, since we're only trylocking.
-		 */
-
-		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
-		if (unlikely(ret == -EBUSY))
-			goto queue;
-
+	if (!ret && !bo->sync_obj) {
 		spin_unlock(&bdev->fence_lock);
 		put_count = ttm_bo_del_from_lru(bo);
 
@@ -530,22 +524,22 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 		ttm_bo_list_ref_sub(bo, put_count, true);
 
 		return;
-	} else {
-		spin_lock(&glob->lru_lock);
 	}
-queue:
-	driver = bdev->driver;
 	if (bo->sync_obj)
 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
-	sync_obj_arg = bo->sync_obj_arg;
+	spin_unlock(&bdev->fence_lock);
+
+	if (!ret) {
+		atomic_set(&bo->reserved, 0);
+		wake_up_all(&bo->event_queue);
+	}
 
 	kref_get(&bo->list_kref);
 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 	spin_unlock(&glob->lru_lock);
-	spin_unlock(&bdev->fence_lock);
 
 	if (sync_obj) {
-		driver->sync_obj_flush(sync_obj, sync_obj_arg);
+		driver->sync_obj_flush(sync_obj);
 		driver->sync_obj_unref(&sync_obj);
 	}
 	schedule_delayed_work(&bdev->wq,
@@ -553,68 +547,84 @@ queue:
 }
 
 /**
- * function ttm_bo_cleanup_refs
+ * function ttm_bo_cleanup_refs_and_unlock
  * If bo idle, remove from delayed- and lru lists, and unref.
  * If not idle, do nothing.
  *
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
  * @interruptible         Any sleeps should occur interruptibly.
- * @no_wait_reserve       Never wait for reserve. Return -EBUSY instead.
  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
  */
 
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
-			       bool interruptible,
-			       bool no_wait_reserve,
-			       bool no_wait_gpu)
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+					  bool interruptible,
+					  bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
 	struct ttm_bo_global *glob = bo->glob;
 	int put_count;
-	int ret = 0;
+	int ret;
 
-retry:
 	spin_lock(&bdev->fence_lock);
-	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-	spin_unlock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, false, true);
 
-	if (unlikely(ret != 0))
-		return ret;
+	if (ret && !no_wait_gpu) {
+		void *sync_obj;
 
-retry_reserve:
-	spin_lock(&glob->lru_lock);
+		/*
+		 * Take a reference to the fence and unreserve,
+		 * at this point the buffer should be dead, so
+		 * no new sync objects can be attached.
+		 */
+		sync_obj = driver->sync_obj_ref(&bo->sync_obj);
+		spin_unlock(&bdev->fence_lock);
 
-	if (unlikely(list_empty(&bo->ddestroy))) {
-		spin_unlock(&glob->lru_lock);
-		return 0;
-	}
-
-	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
-	if (unlikely(ret == -EBUSY)) {
-		spin_unlock(&glob->lru_lock);
-		if (likely(!no_wait_reserve))
-			ret = ttm_bo_wait_unreserved(bo, interruptible);
-		if (unlikely(ret != 0))
-			return ret;
-
-		goto retry_reserve;
-	}
-
-	BUG_ON(ret != 0);
-
-	/**
-	 * We can re-check for sync object without taking
-	 * the bo::lock since setting the sync object requires
-	 * also bo::reserved. A busy object at this point may
-	 * be caused by another thread recently starting an accelerated
-	 * eviction.
-	 */
-
-	if (unlikely(bo->sync_obj)) {
 		atomic_set(&bo->reserved, 0);
 		wake_up_all(&bo->event_queue);
 		spin_unlock(&glob->lru_lock);
-		goto retry;
+
+		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+		driver->sync_obj_unref(&sync_obj);
+		if (ret)
+			return ret;
+
+		/*
+		 * remove sync_obj with ttm_bo_wait, the wait should be
+		 * finished, and no new wait object should have been added.
+		 */
+		spin_lock(&bdev->fence_lock);
+		ret = ttm_bo_wait(bo, false, false, true);
+		WARN_ON(ret);
+		spin_unlock(&bdev->fence_lock);
+		if (ret)
+			return ret;
+
+		spin_lock(&glob->lru_lock);
+		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
+		/*
+		 * We raced, and lost, someone else holds the reservation now,
+		 * and is probably busy in ttm_bo_cleanup_memtype_use.
+		 *
+		 * Even if it's not the case, because we finished waiting any
+		 * delayed destruction would succeed, so just return success
+		 * here.
+		 */
+		if (ret) {
+			spin_unlock(&glob->lru_lock);
+			return 0;
+		}
+	} else
+		spin_unlock(&bdev->fence_lock);
+
+	if (ret || unlikely(list_empty(&bo->ddestroy))) {
+		atomic_set(&bo->reserved, 0);
+		wake_up_all(&bo->event_queue);
+		spin_unlock(&glob->lru_lock);
+		return ret;
 	}
 
 	put_count = ttm_bo_del_from_lru(bo);
@@ -657,9 +667,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 			kref_get(&nentry->list_kref);
 		}
 
-		spin_unlock(&glob->lru_lock);
-		ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
-					  !remove_all);
+		ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+		if (!ret)
+			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+							     !remove_all);
+		else
+			spin_unlock(&glob->lru_lock);
+
 		kref_put(&entry->list_kref, ttm_bo_release_list);
 		entry = nentry;
 
@@ -697,6 +711,7 @@ static void ttm_bo_release(struct kref *kref)
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
+	write_lock(&bdev->vm_lock);
 	if (likely(bo->vm_node != NULL)) {
 		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
 		drm_mm_put_block(bo->vm_node);
@@ -708,18 +723,14 @@ static void ttm_bo_release(struct kref *kref)
 	ttm_mem_io_unlock(man);
 	ttm_bo_cleanup_refs_or_queue(bo);
 	kref_put(&bo->list_kref, ttm_bo_release_list);
-	write_lock(&bdev->vm_lock);
 }
 
 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 {
 	struct ttm_buffer_object *bo = *p_bo;
-	struct ttm_bo_device *bdev = bo->bdev;
 
 	*p_bo = NULL;
-	write_lock(&bdev->vm_lock);
 	kref_put(&bo->kref, ttm_bo_release);
-	write_unlock(&bdev->vm_lock);
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
@@ -738,7 +749,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 
 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
-			bool no_wait_reserve, bool no_wait_gpu)
+			bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_reg evict_mem;
@@ -756,7 +767,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 		goto out;
 	}
 
-	BUG_ON(!atomic_read(&bo->reserved));
+	BUG_ON(!ttm_bo_is_reserved(bo));
 
 	evict_mem = bo->mem;
 	evict_mem.mm_node = NULL;
@@ -769,7 +780,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 	placement.num_busy_placement = 0;
 	bdev->driver->evict_flags(bo, &placement);
 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
-				no_wait_reserve, no_wait_gpu);
+				no_wait_gpu);
 	if (ret) {
 		if (ret != -ERESTARTSYS) {
 			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -780,7 +791,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 	}
 
 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
-				     no_wait_reserve, no_wait_gpu);
+				     no_wait_gpu);
 	if (ret) {
 		if (ret != -ERESTARTSYS)
 			pr_err("Buffer eviction failed\n");
@@ -794,49 +805,33 @@ out:
 
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 				uint32_t mem_type,
-				bool interruptible, bool no_wait_reserve,
+				bool interruptible,
 				bool no_wait_gpu)
 {
 	struct ttm_bo_global *glob = bdev->glob;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	struct ttm_buffer_object *bo;
-	int ret, put_count = 0;
+	int ret = -EBUSY, put_count;
 
-retry:
 	spin_lock(&glob->lru_lock);
-	if (list_empty(&man->lru)) {
-		spin_unlock(&glob->lru_lock);
-		return -EBUSY;
+	list_for_each_entry(bo, &man->lru, lru) {
+		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+		if (!ret)
+			break;
 	}
 
-	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
-	kref_get(&bo->list_kref);
-
-	if (!list_empty(&bo->ddestroy)) {
+	if (ret) {
 		spin_unlock(&glob->lru_lock);
-		ret = ttm_bo_cleanup_refs(bo, interruptible,
-					  no_wait_reserve, no_wait_gpu);
-		kref_put(&bo->list_kref, ttm_bo_release_list);
-
 		return ret;
 	}
 
-	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
-	if (unlikely(ret == -EBUSY)) {
-		spin_unlock(&glob->lru_lock);
-		if (likely(!no_wait_reserve))
-			ret = ttm_bo_wait_unreserved(bo, interruptible);
+	kref_get(&bo->list_kref);
 
+	if (!list_empty(&bo->ddestroy)) {
+		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+						     no_wait_gpu);
 		kref_put(&bo->list_kref, ttm_bo_release_list);
-
-		/**
-		 * We *need* to retry after releasing the lru lock.
-		 */
-
-		if (unlikely(ret != 0))
-			return ret;
-		goto retry;
+		return ret;
 	}
 
 	put_count = ttm_bo_del_from_lru(bo);
@@ -846,7 +841,7 @@ retry:
 
 	ttm_bo_list_ref_sub(bo, put_count, true);
 
-	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
+	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
 	ttm_bo_unreserve(bo);
 
 	kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -871,7 +866,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 					struct ttm_placement *placement,
 					struct ttm_mem_reg *mem,
 					bool interruptible,
-					bool no_wait_reserve,
 					bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -884,8 +878,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 			return ret;
 		if (mem->mm_node)
 			break;
-		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
-						no_wait_reserve, no_wait_gpu);
+		ret = ttm_mem_evict_first(bdev, mem_type,
+					  interruptible, no_wait_gpu);
 		if (unlikely(ret != 0))
 			return ret;
 	} while (1);
@@ -950,7 +944,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 			struct ttm_placement *placement,
 			struct ttm_mem_reg *mem,
-			bool interruptible, bool no_wait_reserve,
+			bool interruptible,
 			bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -1041,7 +1035,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 		}
 
 		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
-						interruptible, no_wait_reserve, no_wait_gpu);
+						interruptible, no_wait_gpu);
 		if (ret == 0 && mem->mm_node) {
 			mem->placement = cur_flags;
 			return 0;
@@ -1054,26 +1048,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
-int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
-{
-	if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
-		return -EBUSY;
-
-	return wait_event_interruptible(bo->event_queue,
-					atomic_read(&bo->cpu_writers) == 0);
-}
-EXPORT_SYMBOL(ttm_bo_wait_cpu);
-
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 			struct ttm_placement *placement,
-			bool interruptible, bool no_wait_reserve,
+			bool interruptible,
 			bool no_wait_gpu)
 {
 	int ret = 0;
 	struct ttm_mem_reg mem;
 	struct ttm_bo_device *bdev = bo->bdev;
 
-	BUG_ON(!atomic_read(&bo->reserved));
+	BUG_ON(!ttm_bo_is_reserved(bo));
 
 	/*
 	 * FIXME: It's possible to pipeline buffer moves.
@@ -1093,10 +1077,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 	/*
 	 * Determine where to move the buffer.
 	 */
-	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
+	ret = ttm_bo_mem_space(bo, placement, &mem,
+			       interruptible, no_wait_gpu);
 	if (ret)
 		goto out_unlock;
-	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
+	ret = ttm_bo_handle_move_mem(bo, &mem, false,
+				     interruptible, no_wait_gpu);
 out_unlock:
 	if (ret && mem.mm_node)
 		ttm_bo_mem_put(bo, &mem);
@@ -1125,12 +1111,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
 			struct ttm_placement *placement,
-			bool interruptible, bool no_wait_reserve,
+			bool interruptible,
 			bool no_wait_gpu)
 {
 	int ret;
 
-	BUG_ON(!atomic_read(&bo->reserved));
+	BUG_ON(!ttm_bo_is_reserved(bo));
 	/* Check that range is valid */
 	if (placement->lpfn || placement->fpfn)
 		if (placement->fpfn > placement->lpfn ||
@@ -1141,7 +1127,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
 	 */
 	ret = ttm_bo_mem_compat(placement, &bo->mem);
 	if (ret < 0) {
-		ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
+		ret = ttm_bo_move_buffer(bo, placement, interruptible,
+					 no_wait_gpu);
 		if (ret)
 			return ret;
 	} else {
@@ -1179,7 +1166,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
 		enum ttm_bo_type type,
 		struct ttm_placement *placement,
 		uint32_t page_alignment,
-		unsigned long buffer_start,
 		bool interruptible,
 		struct file *persistent_swap_storage,
 		size_t acc_size,
@@ -1200,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
 		return -ENOMEM;
 	}
 
-	size += buffer_start & ~PAGE_MASK;
 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	if (num_pages == 0) {
 		pr_err("Illegal buffer object size\n");
@@ -1233,7 +1218,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
 	bo->mem.page_alignment = page_alignment;
 	bo->mem.bus.io_reserved_vm = false;
 	bo->mem.bus.io_reserved_count = 0;
-	bo->buffer_start = buffer_start & PAGE_MASK;
 	bo->priv_flags = 0;
 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
 	bo->seq_valid = false;
@@ -1257,7 +1241,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
 			goto out_err;
 	}
 
-	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
 	if (ret)
 		goto out_err;
 
@@ -1306,7 +1290,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
 			enum ttm_bo_type type,
 			struct ttm_placement *placement,
 			uint32_t page_alignment,
-			unsigned long buffer_start,
 			bool interruptible,
 			struct file *persistent_swap_storage,
 			struct ttm_buffer_object **p_bo)
@@ -1321,8 +1304,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
 
 	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
 	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
-				buffer_start, interruptible,
-			  persistent_swap_storage, acc_size, NULL, NULL);
+			  interruptible, persistent_swap_storage, acc_size,
+			  NULL, NULL);
 	if (likely(ret == 0))
 		*p_bo = bo;
 
@@ -1344,7 +1327,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 	spin_lock(&glob->lru_lock);
 	while (!list_empty(&man->lru)) {
 		spin_unlock(&glob->lru_lock);
-		ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
+		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
 		if (ret) {
 			if (allow_errors) {
 				return ret;
@@ -1577,7 +1560,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 		goto out_no_addr_mm;
 
 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
-	bdev->nice_mode = true;
 	INIT_LIST_HEAD(&bdev->ddestroy);
 	bdev->dev_mapping = NULL;
 	bdev->glob = glob;
@@ -1721,7 +1703,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 	struct ttm_bo_driver *driver = bo->bdev->driver;
 	struct ttm_bo_device *bdev = bo->bdev;
 	void *sync_obj;
-	void *sync_obj_arg;
 	int ret = 0;
 
 	if (likely(bo->sync_obj == NULL))
@@ -1729,7 +1710,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 
 	while (bo->sync_obj) {
 
-		if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
+		if (driver->sync_obj_signaled(bo->sync_obj)) {
 			void *tmp_obj = bo->sync_obj;
 			bo->sync_obj = NULL;
 			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
@@ -1743,9 +1724,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 			return -EBUSY;
 
 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
-		sync_obj_arg = bo->sync_obj_arg;
 		spin_unlock(&bdev->fence_lock);
-		ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
+		ret = driver->sync_obj_wait(sync_obj,
 					    lazy, interruptible);
 		if (unlikely(ret != 0)) {
 			driver->sync_obj_unref(&sync_obj);
@@ -1753,8 +1733,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 			return ret;
 		}
 		spin_lock(&bdev->fence_lock);
-		if (likely(bo->sync_obj == sync_obj &&
-			   bo->sync_obj_arg == sync_obj_arg)) {
+		if (likely(bo->sync_obj == sync_obj)) {
 			void *tmp_obj = bo->sync_obj;
 			bo->sync_obj = NULL;
 			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
@@ -1797,8 +1776,7 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
 
 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
 {
-	if (atomic_dec_and_test(&bo->cpu_writers))
-		wake_up_all(&bo->event_queue);
+	atomic_dec(&bo->cpu_writers);
 }
 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
 
@@ -1817,40 +1795,25 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
 
 	spin_lock(&glob->lru_lock);
-	while (ret == -EBUSY) {
-		if (unlikely(list_empty(&glob->swap_lru))) {
-			spin_unlock(&glob->lru_lock);
-			return -EBUSY;
-		}
-
-		bo = list_first_entry(&glob->swap_lru,
-				      struct ttm_buffer_object, swap);
-		kref_get(&bo->list_kref);
-
-		if (!list_empty(&bo->ddestroy)) {
-			spin_unlock(&glob->lru_lock);
-			(void) ttm_bo_cleanup_refs(bo, false, false, false);
-			kref_put(&bo->list_kref, ttm_bo_release_list);
-			spin_lock(&glob->lru_lock);
-			continue;
-		}
-
-		/**
-		 * Reserve buffer. Since we unlock while sleeping, we need
-		 * to re-check that nobody removed us from the swap-list while
-		 * we slept.
-		 */
-
+	list_for_each_entry(bo, &glob->swap_lru, swap) {
 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-		if (unlikely(ret == -EBUSY)) {
-			spin_unlock(&glob->lru_lock);
-			ttm_bo_wait_unreserved(bo, false);
-			kref_put(&bo->list_kref, ttm_bo_release_list);
-			spin_lock(&glob->lru_lock);
-		}
+		if (!ret)
+			break;
+	}
+
+	if (ret) {
+		spin_unlock(&glob->lru_lock);
+		return ret;
+	}
+
+	kref_get(&bo->list_kref);
+
+	if (!list_empty(&bo->ddestroy)) {
+		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+		kref_put(&bo->list_kref, ttm_bo_release_list);
+		return ret;
 	}
 
-	BUG_ON(ret != 0);
 	put_count = ttm_bo_del_from_lru(bo);
 	spin_unlock(&glob->lru_lock);
 
@@ -1876,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 		evict_mem.mem_type = TTM_PL_SYSTEM;
 
 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
-					     false, false, false);
+					     false, false);
 		if (unlikely(ret != 0))
 			goto out;
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2026060f03e0..9e9c5d2a5c74 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-		    bool evict, bool no_wait_reserve,
+		    bool evict,
 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
 	struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-		       bool evict, bool no_wait_reserve, bool no_wait_gpu,
+		       bool evict, bool no_wait_gpu,
 		       struct ttm_mem_reg *new_mem)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -611,8 +611,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
 
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 			      void *sync_obj,
-			      void *sync_obj_arg,
-			      bool evict, bool no_wait_reserve,
+			      bool evict,
 			      bool no_wait_gpu,
 			      struct ttm_mem_reg *new_mem)
 {
@@ -630,7 +629,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 		bo->sync_obj = NULL;
 	}
 	bo->sync_obj = driver->sync_obj_ref(sync_obj);
-	bo->sync_obj_arg = sync_obj_arg;
 	if (evict) {
 		ret = ttm_bo_wait(bo, false, false, false);
 		spin_unlock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3ba72dbdc4bd..74705f329d99 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
 	read_lock(&bdev->vm_lock);
 	bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
 				 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
-	if (likely(bo != NULL))
-		ttm_bo_reference(bo);
+	if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
+		bo = NULL;
 	read_unlock(&bdev->vm_lock);
 
 	if (unlikely(bo == NULL)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 1937069432c5..cd9e4523dc56 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -185,10 +185,7 @@ retry_this_bo:
 			ttm_eu_backoff_reservation_locked(list);
 			spin_unlock(&glob->lru_lock);
 			ttm_eu_list_ref_sub(list);
-			ret = ttm_bo_wait_cpu(bo, false);
-			if (ret)
-				return ret;
-			goto retry;
+			return -EBUSY;
 		}
 	}
 
@@ -216,19 +213,18 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
 	driver = bdev->driver;
 	glob = bo->glob;
 
-	spin_lock(&bdev->fence_lock);
 	spin_lock(&glob->lru_lock);
+	spin_lock(&bdev->fence_lock);
 
 	list_for_each_entry(entry, list, head) {
 		bo = entry->bo;
 		entry->old_sync_obj = bo->sync_obj;
 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
-		bo->sync_obj_arg = entry->new_sync_obj_arg;
 		ttm_bo_unreserve_locked(bo);
 		entry->reserved = false;
 	}
-	spin_unlock(&glob->lru_lock);
 	spin_unlock(&bdev->fence_lock);
+	spin_unlock(&glob->lru_lock);
 
 	list_for_each_entry(entry, list, head) {
 		if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 479c6b0467ca..dbc2def887cd 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -367,7 +367,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
 	spin_lock_init(&glob->lock);
 	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
 	INIT_WORK(&glob->work, ttm_shrink_work);
-	init_waitqueue_head(&glob->queue);
 	ret = kobject_init_and_add(
 		&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
 	if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index c7857874956a..58a5f3261c0b 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -80,7 +80,7 @@ struct ttm_object_file {
  */
 
 struct ttm_object_device {
-	rwlock_t object_lock;
+	spinlock_t object_lock;
 	struct drm_open_hash object_hash;
 	atomic_t object_count;
 	struct ttm_mem_global *mem_glob;
@@ -157,12 +157,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
 	base->refcount_release = refcount_release;
 	base->ref_obj_release = ref_obj_release;
 	base->object_type = object_type;
-	write_lock(&tdev->object_lock);
 	kref_init(&base->refcount);
-	ret = drm_ht_just_insert_please(&tdev->object_hash,
-					&base->hash,
-					(unsigned long)base, 31, 0, 0);
-	write_unlock(&tdev->object_lock);
+	spin_lock(&tdev->object_lock);
+	ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
+					    &base->hash,
+					    (unsigned long)base, 31, 0, 0);
+	spin_unlock(&tdev->object_lock);
 	if (unlikely(ret != 0))
 		goto out_err0;
 
@@ -174,7 +174,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
 
 	return 0;
 out_err1:
-	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+	spin_lock(&tdev->object_lock);
+	(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+	spin_unlock(&tdev->object_lock);
 out_err0:
 	return ret;
 }
@@ -186,30 +188,29 @@ static void ttm_release_base(struct kref *kref)
 	    container_of(kref, struct ttm_base_object, refcount);
 	struct ttm_object_device *tdev = base->tfile->tdev;
 
-	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
-	write_unlock(&tdev->object_lock);
+	spin_lock(&tdev->object_lock);
+	(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+	spin_unlock(&tdev->object_lock);
+
+	/*
+	 * Note: We don't use synchronize_rcu() here because it's far
+	 * too slow. It's up to the user to free the object using
+	 * call_rcu() or ttm_base_object_kfree().
+	 */
+
 	if (base->refcount_release) {
 		ttm_object_file_unref(&base->tfile);
 		base->refcount_release(&base);
 	}
-	write_lock(&tdev->object_lock);
 }
 
 void ttm_base_object_unref(struct ttm_base_object **p_base)
 {
 	struct ttm_base_object *base = *p_base;
-	struct ttm_object_device *tdev = base->tfile->tdev;
 
 	*p_base = NULL;
 
-	/*
-	 * Need to take the lock here to avoid racing with
-	 * users trying to look up the object.
-	 */
-
-	write_lock(&tdev->object_lock);
 	kref_put(&base->refcount, ttm_release_base);
-	write_unlock(&tdev->object_lock);
 }
 EXPORT_SYMBOL(ttm_base_object_unref);
 
@@ -221,14 +222,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
 	struct drm_hash_item *hash;
 	int ret;
 
-	read_lock(&tdev->object_lock);
-	ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+	rcu_read_lock();
+	ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
 
 	if (likely(ret == 0)) {
 		base = drm_hash_entry(hash, struct ttm_base_object, hash);
-		kref_get(&base->refcount);
+		ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
 	}
-	read_unlock(&tdev->object_lock);
+	rcu_read_unlock();
 
 	if (unlikely(ret != 0))
 		return NULL;
@@ -426,7 +427,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
 		return NULL;
 
 	tdev->mem_glob = mem_glob;
-	rwlock_init(&tdev->object_lock);
+	spin_lock_init(&tdev->object_lock);
 	atomic_set(&tdev->object_count, 0);
 	ret = drm_ht_create(&tdev->object_hash, hash_order);
 
@@ -444,9 +445,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
 
 	*p_tdev = NULL;
 
-	write_lock(&tdev->object_lock);
+	spin_lock(&tdev->object_lock);
 	drm_ht_remove(&tdev->object_hash);
-	write_unlock(&tdev->object_lock);
+	spin_unlock(&tdev->object_lock);
 
 	kfree(tdev);
 }
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b3b2cedf6745..512f44add89f 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -84,7 +84,8 @@ udl_detect(struct drm_connector *connector, bool force)
 	return connector_status_connected;
 }
 
-struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
+static struct drm_encoder*
+udl_best_single_encoder(struct drm_connector *connector)
 {
 	int enc_id = connector->encoder_ids[0];
 	struct drm_mode_object *obj;
@@ -97,8 +98,9 @@ struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
 	return encoder;
 }
 
-int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property,
-			       uint64_t val)
+static int udl_connector_set_property(struct drm_connector *connector,
+				      struct drm_property *property,
+				      uint64_t val)
 {
 	return 0;
 }
@@ -110,13 +112,13 @@ static void udl_connector_destroy(struct drm_connector *connector)
 	kfree(connector);
 }
 
-struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
 	.get_modes = udl_get_modes,
 	.mode_valid = udl_mode_valid,
 	.best_encoder = udl_best_single_encoder,
 };
 
-struct drm_connector_funcs udl_connector_funcs = {
+static struct drm_connector_funcs udl_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = udl_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -138,7 +140,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
 	drm_sysfs_connector_add(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.dirty_info_property,
 				      1);
 	return 0;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 586869c8c11f..2cc6cd91ac11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
 	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
 	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
 	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
-	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
+	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+	    vmwgfx_surface.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..8369c3ba10fe
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -0,0 +1,909 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
+#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ *    1. Red, bump W, luminance and depth are stored in the first channel.
+ *    2. Green, bump V and stencil are stored in the second channel.
+ *    3. Blue and bump U are stored in the third channel.
+ *    4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ *    1. For compressed formats, only the data channel is used and its size
+ *       is equal to that of a singular block in the compression scheme.
+ *    2. For buffer formats, only the data channel is used and its size is
+ *       exactly one byte in length.
+ *    3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+	SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
+	SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
+						    data */
+	SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
+						    data */
+	SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
+						    U and V */
+	SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
+						    data */
+	SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
+						    data */
+	SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
+						    channel */
+	SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
+						    data */
+	SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
+						    data */
+	SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
+						    data */
+	SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
+						    data */
+	SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
+	SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
+						    channel */
+	SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
+						    data */
+	SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
+						    data */
+	SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
+						    data depending on the
+						    compression method used */
+	SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
+						    floating point
+						    representation in
+						    all channels */
+	SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
+						    data. */
+	SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
+	SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
+	SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
+	SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
+	SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
+						    e.g., NV12. */
+	SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
+						    Y, U, V, e.g., YV12. */
+
+	SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V,
+	SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_LUMINANCE,
+	SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_W,
+	SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V |
+	SVGA3DBLOCKDESC_W |
+	SVGA3DBLOCKDESC_Q,
+	SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_IEEE_FP,
+	SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
+	SVGA3DBLOCKDESC_STENCIL,
+	SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
+	SVGA3DBLOCKDESC_Y,
+	SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
+	SVGA3DBLOCKDESC_Y |
+	SVGA3DBLOCKDESC_U_VIDEO |
+	SVGA3DBLOCKDESC_V_VIDEO,
+	SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_EXP,
+	SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_2PLANAR_YUV,
+	SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ *    1. Block description.
+ *    2. Dimensions of a block in the surface.
+ *    3. Size of block in bytes.
+ *    4. Bit depth of the pixel data.
+ *    5. Channel bit depths and masks (if applicable).
+ */
+#define SVGA3D_CHANNEL_DEF(type)		\
+	struct {				\
+		union {				\
+			type blue;              \
+			type u;                 \
+			type uv_video;          \
+			type u_video;           \
+		};				\
+		union {				\
+			type green;             \
+			type v;                 \
+			type stencil;           \
+			type v_video;           \
+		};				\
+		union {				\
+			type red;               \
+			type w;                 \
+			type luminance;         \
+			type y;                 \
+			type depth;             \
+			type data;              \
+		};				\
+		union {				\
+			type alpha;             \
+			type q;                 \
+			type exp;               \
+		};				\
+	}
+
+struct svga3d_surface_desc {
+	enum svga3d_block_desc block_desc;
+	surf_size_struct block_size;
+	u32 bytes_per_block;
+	u32 pitch_bytes_per_block;
+
+	struct {
+		u32 total;
+		SVGA3D_CHANNEL_DEF(uint8);
+	} bit_depth;
+
+	struct {
+		SVGA3D_CHANNEL_DEF(uint8);
+	} bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+	{SVGA3DBLOCKDESC_NONE,
+	 {1, 1, 1},  0, 0, {0, {{0}, {0}, {0}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_FORMAT_INVALID */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_X8R8G8B8 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_A8R8G8B8 */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  2, 2, {16, {{5}, {6}, {5}, {0} } },
+	 {{{0}, {5}, {11}, {0} } } },    /* SVGA3D_R5G6B5 */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  2, 2, {15, {{5}, {5}, {5}, {0} } },
+	 {{{0}, {5}, {10}, {0} } } },    /* SVGA3D_X1R5G5B5 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {5}, {1} } },
+	 {{{0}, {5}, {10}, {15} } } },   /* SVGA3D_A1R5G5B5 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  2, 2, {16, {{4}, {4}, {4}, {4} } },
+	 {{{0}, {4}, {8}, {12} } } },    /* SVGA3D_A4R4G4B4 */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D32 */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D16 */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8 */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  2, 2, {16, {{0}, {1}, {15}, {0} } },
+	 {{{0}, {15}, {0}, {0} } } },    /* SVGA3D_Z_D15S1 */
+
+	{SVGA3DBLOCKDESC_LUMINANCE,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE8 */
+
+	{SVGA3DBLOCKDESC_LA,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {4}, {4} } },
+	 {{{0}, {0}, {0}, {4} } } },     /* SVGA3D_LUMINANCE4_ALPHA4 */
+
+	{SVGA3DBLOCKDESC_LUMINANCE,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE16 */
+
+	{SVGA3DBLOCKDESC_LA,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
+	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_LUMINANCE8_ALPHA8 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT1 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT2 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT3 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT4 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT5 */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
+	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_BUMPU8V8 */
+
+	{SVGA3DBLOCKDESC_UVL,
+	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {6}, {0} } },
+	 {{{11}, {6}, {0}, {0} } } },    /* SVGA3D_BUMPL6V5U5 */
+
+	{SVGA3DBLOCKDESC_UVL,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {0} } },
+	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPX8L8V8U8 */
+
+	{SVGA3DBLOCKDESC_UVL,
+	 {1, 1, 1},  3, 3, {24, {{8}, {8}, {8}, {0} } },
+	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPL8V8U8 */
+
+	{SVGA3DBLOCKDESC_RGBA_FP,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_ARGB_S10E5 */
+
+	{SVGA3DBLOCKDESC_RGBA_FP,
+	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_ARGB_S23E8 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2R10G10B10 */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
+	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_V8U8 */
+
+	{SVGA3DBLOCKDESC_UVWQ,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{24}, {16}, {8}, {0} } } },   /* SVGA3D_Q8W8V8U8 */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
+	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_CxV8U8 */
+
+	{SVGA3DBLOCKDESC_UVL,
+	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_X8L8V8U8 */
+
+	{SVGA3DBLOCKDESC_UVWA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2W10V10U10 */
+
+	{SVGA3DBLOCKDESC_ALPHA,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {0}, {8} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_ALPHA8 */
+
+	{SVGA3DBLOCKDESC_R_FP,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S10E5 */
+
+	{SVGA3DBLOCKDESC_R_FP,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S23E8 */
+
+	{SVGA3DBLOCKDESC_RG_FP,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_RG_S10E5 */
+
+	{SVGA3DBLOCKDESC_RG_FP,
+	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_RG_S23E8 */
+
+	{SVGA3DBLOCKDESC_BUFFER,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BUFFER */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24X8 */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  4, 4, {32, {{16}, {16}, {0}, {0} } },
+	 {{{16}, {0}, {0}, {0} } } },    /* SVGA3D_V16U16 */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {0}, {16}, {0} } } },    /* SVGA3D_G16R16 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_A16B16G16R16 */
+
+	{SVGA3DBLOCKDESC_YUV,
+	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {8}, {0} } } },     /* SVGA3D_UYVY */
+
+	{SVGA3DBLOCKDESC_YUV,
+	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
+	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_YUY2 */
+
+	{SVGA3DBLOCKDESC_NV12,
+	 {2, 2, 1},  6, 2, {48, {{0}, {0}, {48}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_NV12 */
+
+	{SVGA3DBLOCKDESC_AYUV,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_AYUV */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_UINT */
+
+	{SVGA3DBLOCKDESC_UVWQ,
+	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_SINT */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGB_FP,
+	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_FLOAT */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_UINT */
+
+	{SVGA3DBLOCKDESC_UVW,
+	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_SINT */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_UINT */
+
+	{SVGA3DBLOCKDESC_UVWQ,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SNORM */
+
+	{SVGA3DBLOCKDESC_UVWQ,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_UINT */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_SINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G8X24_TYPELESS */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_D32_FLOAT_S8X24_UINT */
+
+	{SVGA3DBLOCKDESC_R_FP,
+	 {1, 1, 1},  8, 8, {64, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },    /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
+
+	{SVGA3DBLOCKDESC_GREEN,
+	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {0}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_X32_TYPELESS_G8X24_UINT */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_UINT */
+
+	{SVGA3DBLOCKDESC_RGB_FP,
+	 {1, 1, 1},  4, 4, {32, {{10}, {11}, {11}, {0} } },
+	 {{{0}, {10}, {21}, {0} } } },  /* SVGA3D_R11G11B10_FLOAT */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM */
+
+	{SVGA3DBLOCKDESC_RGBA_SRGB,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UINT */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_SINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RG_FP,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_UINT */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_SINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_TYPELESS */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_D32_FLOAT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_UINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_SINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_R24G8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_D24_UNORM_S8_UINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R24_UNORM_X8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_GREEN,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {0}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_X24_TYPELESS_G8_UINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UNORM */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UINT */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_SINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UNORM */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UINT */
+
+	{SVGA3DBLOCKDESC_U,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SNORM */
+
+	{SVGA3DBLOCKDESC_U,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UNORM */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UINT */
+
+	{SVGA3DBLOCKDESC_U,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SNORM */
+
+	{SVGA3DBLOCKDESC_U,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {8, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R1_UNORM */
+
+	{SVGA3DBLOCKDESC_RGBE,
+	 {1, 1, 1},  4, 4, {32, {{9}, {9}, {9}, {5} } },
+	 {{{18}, {9}, {0}, {27} } } },   /* SVGA3D_R9G9B9E5_SHAREDEXP */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_B8G8_UNORM */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_G8R8_G8B8_UNORM */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_UNORM */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_SNORM */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_UNORM */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_SNORM */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA_SRGB,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGB_SRGB,
+	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_DF16 */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_DF24 */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8_INT */
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+	uint64_t tmp = (uint64_t) a*b;
+	return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+	if (format < ARRAY_SIZE(svga3d_surface_descs))
+		return &svga3d_surface_descs[format];
+
+	return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ *      Given a base level size and the mip level, compute the size of
+ *      the mip level.
+ *
+ * Results:
+ *      See above.
+ *
+ * Side effects:
+ *      None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+	surf_size_struct size;
+
+	size.width = max_t(u32, base_level.width >> mip_level, 1);
+	size.height = max_t(u32, base_level.height >> mip_level, 1);
+	size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+	return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+				 const surf_size_struct *pixel_size,
+				 surf_size_struct *block_size)
+{
+	block_size->width = DIV_ROUND_UP(pixel_size->width,
+					 desc->block_size.width);
+	block_size->height = DIV_ROUND_UP(pixel_size->height,
+					  desc->block_size.height);
+	block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+					 desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+	return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+			      const surf_size_struct *size)
+{
+	u32 pitch;
+	surf_size_struct blocks;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+	pitch = blocks.width * desc->pitch_bytes_per_block;
+
+	return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ *      Return the number of bytes of buffer space required to store
+ *      one image of a surface, optionally using the specified pitch.
+ *
+ *      If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ *      This function is overflow-safe. If the result would have
+ *      overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ *      Byte count.
+ *
+ * Side effects:
+ *      None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+				    const surf_size_struct *size,
+				    u32 pitch)
+{
+	surf_size_struct image_blocks;
+	u32 slice_size, total_size;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+	if (svga3dsurface_is_planar_surface(desc)) {
+		total_size = clamped_umul32(image_blocks.width,
+					    image_blocks.height);
+		total_size = clamped_umul32(total_size, image_blocks.depth);
+		total_size = clamped_umul32(total_size, desc->bytes_per_block);
+		return total_size;
+	}
+
+	if (pitch == 0)
+		pitch = svga3dsurface_calculate_pitch(desc, size);
+
+	slice_size = clamped_umul32(image_blocks.height, pitch);
+	total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+	return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+				  surf_size_struct base_level_size,
+				  u32 num_mip_levels,
+				  bool cubemap)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	u32 total_size = 0;
+	u32 mip;
+
+	for (mip = 0; mip < num_mip_levels; mip++) {
+		surf_size_struct size =
+			svga3dsurface_get_mip_size(base_level_size, mip);
+		total_size += svga3dsurface_get_image_buffer_size(desc,
+								  &size, 0);
+	}
+
+	if (cubemap)
+		total_size *= SVGA3D_MAX_SURFACE_FACES;
+
+	return total_size;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+			       u32 width, u32 height,
+			       u32 x, u32 y, u32 z)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+	const u32 bd = desc->block_size.depth;
+	const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+	const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+	const u32 offset = (z / bd * imgstride +
+			    y / bh * rowstride +
+			    x / bw * desc->bytes_per_block);
+	return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+			       surf_size_struct baseLevelSize,
+			       u32 numMipLevels,
+			       u32 face,
+			       u32 mip)
+
+{
+	u32 offset;
+	u32 mipChainBytes;
+	u32 mipChainBytesToLevel;
+	u32 i;
+	const struct svga3d_surface_desc *desc;
+	surf_size_struct mipSize;
+	u32 bytes;
+
+	desc = svga3dsurface_get_desc(format);
+
+	mipChainBytes = 0;
+	mipChainBytesToLevel = 0;
+	for (i = 0; i < numMipLevels; i++) {
+		mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+		bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+		mipChainBytes += bytes;
+		if (i < mip)
+			mipChainBytesToLevel += bytes;
+	}
+
+	offset = mipChainBytes * face + mipChainBytesToLevel;
+
+	return offset;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 9826fbc88154..96dc84dc34d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -248,13 +248,12 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
 	*placement = vmw_sys_placement;
 }
 
-/**
- * FIXME: Proper access checks on buffers.
- */
-
 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
-	return 0;
+	struct ttm_object_file *tfile =
+		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+	return vmw_user_dmabuf_verify_access(bo, tfile);
 }
 
 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -310,27 +309,23 @@ static void vmw_sync_obj_unref(void **sync_obj)
 	vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
 }
 
-static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int vmw_sync_obj_flush(void *sync_obj)
 {
 	vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
 	return 0;
 }
 
-static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool vmw_sync_obj_signaled(void *sync_obj)
 {
-	unsigned long flags = (unsigned long) sync_arg;
 	return	vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
-				       (uint32_t) flags);
+				       DRM_VMW_FENCE_FLAG_EXEC);
 
 }
 
-static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
-			     bool lazy, bool interruptible)
+static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
 {
-	unsigned long flags = (unsigned long) sync_arg;
-
 	return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
-				  (uint32_t) flags,
+				  DRM_VMW_FENCE_FLAG_EXEC,
 				  lazy, interruptible,
 				  VMW_FENCE_WAIT_TIMEOUT);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644
index 000000000000..00ae0925aca8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -0,0 +1,274 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+struct vmw_user_context {
+	struct ttm_base_object base;
+	struct vmw_resource res;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+	.object_type = VMW_RES_CONTEXT,
+	.base_obj_to_res = vmw_user_context_base_to_res,
+	.res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+	&user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+	.res_type = vmw_res_context,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "legacy contexts",
+	.backup_placement = NULL,
+	.create = NULL,
+	.destroy = NULL,
+	.bind = NULL,
+	.unbind = NULL
+};
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyContext body;
+	} *cmd;
+
+
+	vmw_execbuf_release_pinned_bo(dev_priv);
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "destruction.\n");
+		return;
+	}
+
+	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+	cmd->body.cid = cpu_to_le32(res->id);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_3d_resource_dec(dev_priv, false);
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+			    struct vmw_resource *res,
+			    void (*res_free) (struct vmw_resource *res))
+{
+	int ret;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineContext body;
+	} *cmd;
+
+	ret = vmw_resource_init(dev_priv, res, false,
+				res_free, &vmw_legacy_context_func);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a resource id.\n");
+		goto out_early;
+	}
+
+	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+		DRM_ERROR("Out of hw context ids.\n");
+		vmw_resource_unreference(&res);
+		return -ENOMEM;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		vmw_resource_unreference(&res);
+		return -ENOMEM;
+	}
+
+	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+	cmd->body.cid = cpu_to_le32(res->id);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_resource_activate(res, vmw_hw_context_destroy);
+	return 0;
+
+out_early:
+	if (res_free == NULL)
+		kfree(res);
+	else
+		res_free(res);
+	return ret;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+	int ret;
+
+	if (unlikely(res == NULL))
+		return NULL;
+
+	ret = vmw_context_init(dev_priv, res, NULL);
+
+	return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+	return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+	struct vmw_user_context *ctx =
+	    container_of(res, struct vmw_user_context, res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	ttm_base_object_kfree(ctx, base);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv),
+			    vmw_user_context_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_context *ctx =
+	    container_of(base, struct vmw_user_context, base);
+	struct vmw_resource *res = &ctx->res;
+
+	*p_base = NULL;
+	vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_context *ctx;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	int ret;
+
+
+	/*
+	 * Approximate idr memory usage with 128 bytes. It will be limited
+	 * by maximum number_of contexts anyway.
+	 */
+
+	if (unlikely(vmw_user_context_size == 0))
+		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_user_context_size,
+				   false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for context"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (unlikely(ctx == NULL)) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_user_context_size);
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	res = &ctx->res;
+	ctx->base.shareable = false;
+	ctx->base.tfile = NULL;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+
+	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	tmp = vmw_resource_reference(&ctx->res);
+	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+				   &vmw_user_context_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		goto out_err;
+	}
+
+	arg->cid = ctx->base.hash.key;
+out_err:
+	vmw_resource_unreference(&res);
+out_unlock:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d1498bfd7873..5fae06ad7e25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 
 	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
 	if (unlikely(ret != 0))
 		goto err;
 
-	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
 
 	ttm_bo_unreserve(bo);
 
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
 		return ret;
 
 	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+		vmw_execbuf_release_pinned_bo(dev_priv);
 
 	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
 	if (unlikely(ret != 0))
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
 	else
 		placement = &vmw_vram_gmr_placement;
 
-	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
 	if (likely(ret == 0) || ret == -ERESTARTSYS)
 		goto err_unreserve;
 
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
 	else
 		placement = &vmw_vram_placement;
 
-	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
 
 err_unreserve:
 	ttm_bo_unreserve(bo);
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
 		return ret;
 
 	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
+		vmw_execbuf_release_pinned_bo(dev_priv);
 	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
 	if (unlikely(ret != 0))
 		goto err_unlock;
@@ -224,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
 	if (bo->mem.mem_type == TTM_PL_VRAM &&
 	    bo->mem.start < bo->num_pages &&
 	    bo->mem.start > 0)
-		(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
-				       false, false);
+		(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
 
-	ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+	ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
 	/* For some reason we didn't up at the start of vram */
 	WARN_ON(ret == 0 && bo->offset != 0);
@@ -304,7 +302,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
 	uint32_t old_mem_type = bo->mem.mem_type;
 	int ret;
 
-	BUG_ON(!atomic_read(&bo->reserved));
+	BUG_ON(!ttm_bo_is_reserved(bo));
 	BUG_ON(old_mem_type != TTM_PL_VRAM &&
 	       old_mem_type != VMW_PL_GMR);
 
@@ -316,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
 	placement.num_placement = 1;
 	placement.placement = &pl_flags;
 
-	ret = ttm_bo_validate(bo, &placement, false, true, true);
+	ret = ttm_bo_validate(bo, &placement, false, true);
 
 	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2dd185e42f21..161f8b2549aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -292,7 +292,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 			     PAGE_SIZE,
 			     ttm_bo_type_device,
 			     &vmw_vram_sys_placement,
-			     0, 0, false, NULL,
+			     0, false, NULL,
 			     &dev_priv->dummy_query_bo);
 }
 
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	struct vmw_private *dev_priv;
 	int ret;
 	uint32_t svga_id;
+	enum vmw_res_type i;
 
 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	mutex_init(&dev_priv->cmdbuf_mutex);
 	mutex_init(&dev_priv->release_mutex);
 	rwlock_init(&dev_priv->resource_lock);
-	idr_init(&dev_priv->context_idr);
-	idr_init(&dev_priv->surface_idr);
-	idr_init(&dev_priv->stream_idr);
+
+	for (i = vmw_res_context; i < vmw_res_max; ++i) {
+		idr_init(&dev_priv->res_idr[i]);
+		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+	}
+
 	mutex_init(&dev_priv->init_mutex);
 	init_waitqueue_head(&dev_priv->fence_queue);
 	init_waitqueue_head(&dev_priv->fifo_queue);
 	dev_priv->fence_queue_waiters = 0;
 	atomic_set(&dev_priv->fifo_queue_waiters, 0);
-	INIT_LIST_HEAD(&dev_priv->surface_lru);
+
 	dev_priv->used_memory_size = 0;
 
 	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -609,14 +613,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		}
 	}
 
+	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+		ret = drm_irq_install(dev);
+		if (ret != 0) {
+			DRM_ERROR("Failed installing irq: %d\n", ret);
+			goto out_no_irq;
+		}
+	}
+
 	dev_priv->fman = vmw_fence_manager_init(dev_priv);
 	if (unlikely(dev_priv->fman == NULL))
 		goto out_no_fman;
 
-	/* Need to start the fifo to check if we can do screen objects */
-	ret = vmw_3d_resource_inc(dev_priv, true);
-	if (unlikely(ret != 0))
-		goto out_no_fifo;
 	vmw_kms_save_vga(dev_priv);
 
 	/* Start kms and overlay systems, needs fifo. */
@@ -625,25 +633,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		goto out_no_kms;
 	vmw_overlay_init(dev_priv);
 
-	/* 3D Depends on Screen Objects being used. */
-	DRM_INFO("Detected %sdevice 3D availability.\n",
-		 vmw_fifo_have_3d(dev_priv) ?
-		 "" : "no ");
-
-	/* We might be done with the fifo now */
 	if (dev_priv->enable_fb) {
+		ret = vmw_3d_resource_inc(dev_priv, true);
+		if (unlikely(ret != 0))
+			goto out_no_fifo;
 		vmw_fb_init(dev_priv);
-	} else {
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, true);
-	}
-
-	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
-		ret = drm_irq_install(dev);
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("Failed installing irq: %d\n", ret);
-			goto out_no_irq;
-		}
 	}
 
 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
@@ -651,20 +645,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
 	return 0;
 
-out_no_irq:
-	if (dev_priv->enable_fb)
-		vmw_fb_close(dev_priv);
+out_no_fifo:
 	vmw_overlay_close(dev_priv);
 	vmw_kms_close(dev_priv);
 out_no_kms:
-	/* We still have a 3D resource reference held */
-	if (dev_priv->enable_fb) {
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, false);
-	}
-out_no_fifo:
+	vmw_kms_restore_vga(dev_priv);
 	vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
+	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+		drm_irq_uninstall(dev_priv->dev);
+out_no_irq:
 	if (dev_priv->stealth)
 		pci_release_region(dev->pdev, 2);
 	else
@@ -684,9 +674,9 @@ out_err2:
 out_err1:
 	vmw_ttm_global_release(dev_priv);
 out_err0:
-	idr_destroy(&dev_priv->surface_idr);
-	idr_destroy(&dev_priv->context_idr);
-	idr_destroy(&dev_priv->stream_idr);
+	for (i = vmw_res_context; i < vmw_res_max; ++i)
+		idr_destroy(&dev_priv->res_idr[i]);
+
 	kfree(dev_priv);
 	return ret;
 }
@@ -694,13 +684,14 @@ out_err0:
 static int vmw_driver_unload(struct drm_device *dev)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
+	enum vmw_res_type i;
 
 	unregister_pm_notifier(&dev_priv->pm_nb);
 
+	if (dev_priv->ctx.res_ht_initialized)
+		drm_ht_remove(&dev_priv->ctx.res_ht);
 	if (dev_priv->ctx.cmd_bounce)
 		vfree(dev_priv->ctx.cmd_bounce);
-	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-		drm_irq_uninstall(dev_priv->dev);
 	if (dev_priv->enable_fb) {
 		vmw_fb_close(dev_priv);
 		vmw_kms_restore_vga(dev_priv);
@@ -709,6 +700,8 @@ static int vmw_driver_unload(struct drm_device *dev)
 	vmw_kms_close(dev_priv);
 	vmw_overlay_close(dev_priv);
 	vmw_fence_manager_takedown(dev_priv->fman);
+	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+		drm_irq_uninstall(dev_priv->dev);
 	if (dev_priv->stealth)
 		pci_release_region(dev->pdev, 2);
 	else
@@ -723,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 	(void)ttm_bo_device_release(&dev_priv->bdev);
 	vmw_ttm_global_release(dev_priv);
-	idr_destroy(&dev_priv->surface_idr);
-	idr_destroy(&dev_priv->context_idr);
-	idr_destroy(&dev_priv->stream_idr);
+
+	for (i = vmw_res_context; i < vmw_res_max; ++i)
+		idr_destroy(&dev_priv->res_idr[i]);
 
 	kfree(dev_priv);
 
@@ -924,11 +917,11 @@ static int vmw_master_set(struct drm_device *dev,
 
 out_no_active_lock:
 	if (!dev_priv->enable_fb) {
+		vmw_kms_restore_vga(dev_priv);
+		vmw_3d_resource_dec(dev_priv, true);
 		mutex_lock(&dev_priv->hw_mutex);
 		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
 		mutex_unlock(&dev_priv->hw_mutex);
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, true);
 	}
 	return ret;
 }
@@ -949,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
 
 	vmw_fp->locked_master = drm_master_get(file_priv->master);
 	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-	vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 
 	if (unlikely((ret != 0))) {
 		DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -962,11 +955,11 @@ static void vmw_master_drop(struct drm_device *dev,
 		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
 		if (unlikely(ret != 0))
 			DRM_ERROR("Unable to clean VRAM on master drop.\n");
+		vmw_kms_restore_vga(dev_priv);
+		vmw_3d_resource_dec(dev_priv, true);
 		mutex_lock(&dev_priv->hw_mutex);
 		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
 		mutex_unlock(&dev_priv->hw_mutex);
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, true);
 	}
 
 	dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1001,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 		 * This empties VRAM and unbinds all GMR bindings.
 		 * Buffer contents is moved to swappable memory.
 		 */
-		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+		vmw_execbuf_release_pinned_bo(dev_priv);
+		vmw_resource_evict_all(dev_priv);
 		ttm_bo_swapout_all(&dev_priv->bdev);
 
 		break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 88a179e26de9..13aeda71280e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,31 +67,46 @@ struct vmw_fpriv {
 
 struct vmw_dma_buffer {
 	struct ttm_buffer_object base;
-	struct list_head validate_list;
-	bool gmr_bound;
-	uint32_t cur_validate_node;
-	bool on_validate_list;
+	struct list_head res_list;
 };
 
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+	struct ttm_validate_buffer base;
+	struct drm_hash_item hash;
+};
+
+struct vmw_res_func;
 struct vmw_resource {
 	struct kref kref;
 	struct vmw_private *dev_priv;
-	struct idr *idr;
 	int id;
-	enum ttm_object_type res_type;
 	bool avail;
-	void (*remove_from_lists) (struct vmw_resource *res);
-	void (*hw_destroy) (struct vmw_resource *res);
+	unsigned long backup_size;
+	bool res_dirty; /* Protected by backup buffer reserved */
+	bool backup_dirty; /* Protected by backup buffer reserved */
+	struct vmw_dma_buffer *backup;
+	unsigned long backup_offset;
+	const struct vmw_res_func *func;
+	struct list_head lru_head; /* Protected by the resource lock */
+	struct list_head mob_head; /* Protected by @backup reserved */
 	void (*res_free) (struct vmw_resource *res);
-	struct list_head validate_head;
-	struct list_head query_head; /* Protected by the cmdbuf mutex */
-	/* TODO is a generic snooper needed? */
-#if 0
-	void (*snoop)(struct vmw_resource *res,
-		      struct ttm_object_file *tfile,
-		      SVGA3dCmdHeader *header);
-	void *snoop_priv;
-#endif
+	void (*hw_destroy) (struct vmw_resource *res);
+};
+
+enum vmw_res_type {
+	vmw_res_context,
+	vmw_res_surface,
+	vmw_res_stream,
+	vmw_res_max
 };
 
 struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
 
 struct vmw_surface {
 	struct vmw_resource res;
-	struct list_head lru_head; /* Protected by the resource lock */
 	uint32_t flags;
 	uint32_t format;
 	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+	struct drm_vmw_size base_size;
 	struct drm_vmw_size *sizes;
 	uint32_t num_sizes;
-
 	bool scanout;
-
 	/* TODO so far just a extra pointer */
 	struct vmw_cursor_snooper snooper;
-	struct ttm_buffer_object *backup;
 	struct vmw_surface_offset *offsets;
-	uint32_t backup_size;
+	SVGA3dTextureFilter autogen_filter;
+	uint32_t multisample_count;
 };
 
 struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
 	uint32_t index;
 };
 
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ *
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+	bool valid;
+	uint32_t handle;
+	struct vmw_resource *res;
+	struct vmw_resource_val_node *node;
+};
+
 struct vmw_sw_context{
-	struct ida bo_list;
-	uint32_t last_cid;
-	bool cid_valid;
+	struct drm_open_hash res_ht;
+	bool res_ht_initialized;
 	bool kernel; /**< is the called made from the kernel */
-	struct vmw_resource *cur_ctx;
-	uint32_t last_sid;
-	uint32_t sid_translation;
-	bool sid_valid;
 	struct ttm_object_file *tfile;
 	struct list_head validate_nodes;
 	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
 	uint32_t cur_reloc;
-	struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+	struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
 	uint32_t cur_val_buf;
 	uint32_t *cmd_bounce;
 	uint32_t cmd_bounce_size;
 	struct list_head resource_list;
 	uint32_t fence_flags;
-	struct list_head query_list;
 	struct ttm_buffer_object *cur_query_bo;
-	uint32_t cur_query_cid;
-	bool query_cid_valid;
+	struct list_head res_relocations;
+	uint32_t *buf_start;
+	struct vmw_res_cache_entry res_cache[vmw_res_max];
+	struct vmw_resource *last_query_ctx;
+	bool needs_post_query_barrier;
+	struct vmw_resource *error_resource;
 };
 
 struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
 	 */
 
 	rwlock_t resource_lock;
-	struct idr context_idr;
-	struct idr surface_idr;
-	struct idr stream_idr;
-
+	struct idr res_idr[vmw_res_max];
 	/*
 	 * Block lastclose from racing with firstopen.
 	 */
@@ -320,6 +347,7 @@ struct vmw_private {
 	struct ttm_buffer_object *dummy_query_bo;
 	struct ttm_buffer_object *pinned_bo;
 	uint32_t query_cid;
+	uint32_t query_cid_valid;
 	bool dummy_query_bo_pinned;
 
 	/*
@@ -329,10 +357,15 @@ struct vmw_private {
 	 * protected by the cmdbuf mutex for simplicity.
 	 */
 
-	struct list_head surface_lru;
+	struct list_head res_lru[vmw_res_max];
 	uint32_t used_memory_size;
 };
 
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_surface, res);
+}
+
 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
 {
 	return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
 /**
  * Resource utilities - vmwgfx_resource.c
  */
+struct vmw_user_resource_conv;
+extern const struct vmw_user_resource_conv *user_surface_converter;
+extern const struct vmw_user_resource_conv *user_context_converter;
 
 extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
 extern void vmw_resource_unreference(struct vmw_resource **p_res);
 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 				  uint32_t handle,
 				  struct vmw_surface **out_surf,
 				  struct vmw_dma_buffer **out_buf);
+extern int vmw_user_resource_lookup_handle(
+	struct vmw_private *dev_priv,
+	struct ttm_object_file *tfile,
+	uint32_t handle,
+	const struct vmw_user_resource_conv *converter,
+	struct vmw_resource **p_res);
 extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_init(struct vmw_private *dev_priv,
-			    struct vmw_surface *srf,
-			    void (*res_free) (struct vmw_resource *res));
-extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
-					  struct ttm_object_file *tfile,
-					  uint32_t handle,
-					  struct vmw_surface **out);
 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -423,6 +461,8 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
 			   size_t size, struct ttm_placement *placement,
 			   bool interuptable,
 			   void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+				  struct ttm_object_file *tfile);
 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file_priv);
 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +480,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 				  struct ttm_object_file *tfile,
 				  uint32_t *inout_id,
 				  struct vmw_resource **out);
-extern void vmw_resource_unreserve(struct list_head *list);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+				   struct vmw_dma_buffer *new_backup,
+				   unsigned long new_backup_offset);
+extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+				     struct ttm_mem_reg *mem);
+extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+				struct vmw_fence_obj *fence);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 
 /**
  * DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +585,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
 			       struct drm_vmw_fence_rep __user
 			       *user_fence_rep,
 			       struct vmw_fence_obj **out_fence);
-
-extern void
-vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
-			      bool only_on_cid_match, uint32_t cid);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+					    struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
 
 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
 				      struct vmw_private *dev_priv,
@@ -699,10 +745,13 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
 static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
 {
 	struct vmw_dma_buffer *tmp_buf = *buf;
-	struct ttm_buffer_object *bo = &tmp_buf->base;
-	*buf = NULL;
 
-	ttm_bo_unref(&bo);
+	*buf = NULL;
+	if (tmp_buf != NULL) {
+		struct ttm_buffer_object *bo = &tmp_buf->base;
+
+		ttm_bo_unref(&bo);
+	}
 }
 
 static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 30654b4cc972..394e6476105b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -30,6 +30,181 @@
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_placement.h>
 
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+	struct list_head head;
+	const struct vmw_resource *res;
+	unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+	struct list_head head;
+	struct drm_hash_item hash;
+	struct vmw_resource *res;
+	struct vmw_dma_buffer *new_backup;
+	unsigned long new_backup_offset;
+	bool first_usage;
+	bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+					bool backoff)
+{
+	struct vmw_resource_val_node *val;
+
+	list_for_each_entry(val, list, head) {
+		struct vmw_resource *res = val->res;
+		struct vmw_dma_buffer *new_backup =
+			backoff ? NULL : val->new_backup;
+
+		vmw_resource_unreserve(res, new_backup,
+			val->new_backup_offset);
+		vmw_dmabuf_unreference(&val->new_backup);
+	}
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+				struct vmw_resource *res,
+				struct vmw_resource_val_node **p_node)
+{
+	struct vmw_resource_val_node *node;
+	struct drm_hash_item *hash;
+	int ret;
+
+	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+				    &hash) == 0)) {
+		node = container_of(hash, struct vmw_resource_val_node, hash);
+		node->first_usage = false;
+		if (unlikely(p_node != NULL))
+			*p_node = node;
+		return 0;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (unlikely(node == NULL)) {
+		DRM_ERROR("Failed to allocate a resource validation "
+			  "entry.\n");
+		return -ENOMEM;
+	}
+
+	node->hash.key = (unsigned long) res;
+	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to initialize a resource validation "
+			  "entry.\n");
+		kfree(node);
+		return ret;
+	}
+	list_add_tail(&node->head, &sw_context->resource_list);
+	node->res = vmw_resource_reference(res);
+	node->first_usage = true;
+
+	if (unlikely(p_node != NULL))
+		*p_node = node;
+
+	return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+				       const struct vmw_resource *res,
+				       unsigned long offset)
+{
+	struct vmw_resource_relocation *rel;
+
+	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+	if (unlikely(rel == NULL)) {
+		DRM_ERROR("Failed to allocate a resource relocation.\n");
+		return -ENOMEM;
+	}
+
+	rel->res = res;
+	rel->offset = offset;
+	list_add_tail(&rel->head, list);
+
+	return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+	struct vmw_resource_relocation *rel, *n;
+
+	list_for_each_entry_safe(rel, n, list, head) {
+		list_del(&rel->head);
+		kfree(rel);
+	}
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+					   struct list_head *list)
+{
+	struct vmw_resource_relocation *rel;
+
+	list_for_each_entry(rel, list, head)
+		cb[rel->offset] = rel->res->id;
+}
+
 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 			   struct vmw_sw_context *sw_context,
 			   SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
 	return 0;
 }
 
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
-					  struct vmw_resource **p_res)
-{
-	struct vmw_resource *res = *p_res;
-
-	if (list_empty(&res->validate_head)) {
-		list_add_tail(&res->validate_head, &sw_context->resource_list);
-		*p_res = NULL;
-	} else
-		vmw_resource_unreference(p_res);
-}
-
 /**
  * vmw_bo_to_validate_list - add a bo to a validate list
  *
  * @sw_context: The software context used for this command submission batch.
  * @bo: The buffer object to add.
- * @fence_flags: Fence flags to be or'ed with any other fence flags for
- * this buffer on this submission batch.
  * @p_val_node: If non-NULL Will be updated with the validate node number
  * on return.
  *
@@ -71,31 +232,43 @@ static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
  */
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 				   struct ttm_buffer_object *bo,
-				   uint32_t fence_flags,
 				   uint32_t *p_val_node)
 {
 	uint32_t val_node;
+	struct vmw_validate_buffer *vval_buf;
 	struct ttm_validate_buffer *val_buf;
+	struct drm_hash_item *hash;
+	int ret;
 
-	val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
-	if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
-		DRM_ERROR("Max number of DMA buffers per submission"
-			  " exceeded.\n");
-		return -EINVAL;
-	}
-
-	val_buf = &sw_context->val_bufs[val_node];
-	if (unlikely(val_node == sw_context->cur_val_buf)) {
-		val_buf->new_sync_obj_arg = NULL;
-		val_buf->bo = ttm_bo_reference(bo);
-		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+				    &hash) == 0)) {
+		vval_buf = container_of(hash, struct vmw_validate_buffer,
+					hash);
+		val_buf = &vval_buf->base;
+		val_node = vval_buf - sw_context->val_bufs;
+	} else {
+		val_node = sw_context->cur_val_buf;
+		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+			DRM_ERROR("Max number of DMA buffers per submission "
+				  "exceeded.\n");
+			return -EINVAL;
+		}
+		vval_buf = &sw_context->val_bufs[val_node];
+		vval_buf->hash.key = (unsigned long) bo;
+		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to initialize a buffer validation "
+				  "entry.\n");
+			return ret;
+		}
 		++sw_context->cur_val_buf;
+		val_buf = &vval_buf->base;
+		val_buf->bo = ttm_bo_reference(bo);
+		val_buf->reserved = false;
+		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 	}
 
-	val_buf->new_sync_obj_arg = (void *)
-		((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
-	sw_context->fence_flags |= fence_flags;
+	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
 
 	if (p_val_node)
 		*p_val_node = val_node;
@@ -103,86 +276,175 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 	return 0;
 }
 
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
+{
+	struct vmw_resource_val_node *val;
+	int ret;
+
+	list_for_each_entry(val, &sw_context->resource_list, head) {
+		struct vmw_resource *res = val->res;
+
+		ret = vmw_resource_reserve(res, val->no_buffer_needed);
+		if (unlikely(ret != 0))
+			return ret;
+
+		if (res->backup) {
+			struct ttm_buffer_object *bo = &res->backup->base;
+
+			ret = vmw_bo_to_validate_list
+				(sw_context, bo, NULL);
+
+			if (unlikely(ret != 0))
+				return ret;
+		}
+	}
+	return 0;
+}
+
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+	struct vmw_resource_val_node *val;
+	int ret;
+
+	list_for_each_entry(val, &sw_context->resource_list, head) {
+		struct vmw_resource *res = val->res;
+
+		ret = vmw_resource_validate(res);
+		if (unlikely(ret != 0)) {
+			if (ret != -ERESTARTSYS)
+				DRM_ERROR("Failed to validate resource.\n");
+			return ret;
+		}
+	}
+	return 0;
+}
+
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     enum vmw_res_type res_type,
+			     const struct vmw_user_resource_conv *converter,
+			     uint32_t *id,
+			     struct vmw_resource_val_node **p_val)
+{
+	struct vmw_res_cache_entry *rcache =
+		&sw_context->res_cache[res_type];
+	struct vmw_resource *res;
+	struct vmw_resource_val_node *node;
+	int ret;
+
+	if (*id == SVGA3D_INVALID_ID)
+		return 0;
+
+	/*
+	 * Fastpath in case of repeated commands referencing the same
+	 * resource
+	 */
+
+	if (likely(rcache->valid && *id == rcache->handle)) {
+		const struct vmw_resource *res = rcache->res;
+
+		rcache->node->first_usage = false;
+		if (p_val)
+			*p_val = rcache->node;
+
+		return vmw_resource_relocation_add
+			(&sw_context->res_relocations, res,
+			 id - sw_context->buf_start);
+	}
+
+	ret = vmw_user_resource_lookup_handle(dev_priv,
+					      sw_context->tfile,
+					      *id,
+					      converter,
+					      &res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not find or use resource 0x%08x.\n",
+			  (unsigned) *id);
+		dump_stack();
+		return ret;
+	}
+
+	rcache->valid = true;
+	rcache->res = res;
+	rcache->handle = *id;
+
+	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+					  res,
+					  id - sw_context->buf_start);
+	if (unlikely(ret != 0))
+		goto out_no_reloc;
+
+	ret = vmw_resource_val_add(sw_context, res, &node);
+	if (unlikely(ret != 0))
+		goto out_no_reloc;
+
+	rcache->node = node;
+	if (p_val)
+		*p_val = node;
+	vmw_resource_unreference(&res);
+	return 0;
+
+out_no_reloc:
+	BUG_ON(sw_context->error_resource != NULL);
+	sw_context->error_resource = res;
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 			     struct vmw_sw_context *sw_context,
 			     SVGA3dCmdHeader *header)
 {
-	struct vmw_resource *ctx;
-
 	struct vmw_cid_cmd {
 		SVGA3dCmdHeader header;
 		__le32 cid;
 	} *cmd;
-	int ret;
 
 	cmd = container_of(header, struct vmw_cid_cmd, header);
-	if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
-		return 0;
-
-	ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
-				&ctx);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Could not find or use context %u\n",
-			  (unsigned) cmd->cid);
-		return ret;
-	}
-
-	sw_context->last_cid = cmd->cid;
-	sw_context->cid_valid = true;
-	sw_context->cur_ctx = ctx;
-	vmw_resource_to_validate_list(sw_context, &ctx);
-
-	return 0;
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 user_context_converter, &cmd->cid, NULL);
 }
 
-static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
-			     struct vmw_sw_context *sw_context,
-			     uint32_t *sid)
-{
-	struct vmw_surface *srf;
-	int ret;
-	struct vmw_resource *res;
-
-	if (*sid == SVGA3D_INVALID_ID)
-		return 0;
-
-	if (likely((sw_context->sid_valid  &&
-		      *sid == sw_context->last_sid))) {
-		*sid = sw_context->sid_translation;
-		return 0;
-	}
-
-	ret = vmw_user_surface_lookup_handle(dev_priv,
-					     sw_context->tfile,
-					     *sid, &srf);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Could ot find or use surface 0x%08x "
-			  "address 0x%08lx\n",
-			  (unsigned int) *sid,
-			  (unsigned long) sid);
-		return ret;
-	}
-
-	ret = vmw_surface_validate(dev_priv, srf);
-	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Could not validate surface.\n");
-		vmw_surface_unreference(&srf);
-		return ret;
-	}
-
-	sw_context->last_sid = *sid;
-	sw_context->sid_valid = true;
-	sw_context->sid_translation = srf->res.id;
-	*sid = sw_context->sid_translation;
-
-	res = &srf->res;
-	vmw_resource_to_validate_list(sw_context, &res);
-
-	return 0;
-}
-
-
 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 					   struct vmw_sw_context *sw_context,
 					   SVGA3dCmdHeader *header)
@@ -198,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 		return ret;
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
-	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.target.sid, NULL);
 	return ret;
 }
 
@@ -213,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 	int ret;
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
-	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.src.sid, NULL);
 	if (unlikely(ret != 0))
 		return ret;
-	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dest.sid, NULL);
 }
 
 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -230,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 	int ret;
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
-	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.src.sid, NULL);
 	if (unlikely(ret != 0))
 		return ret;
-	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dest.sid, NULL);
 }
 
 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -252,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 		return -EPERM;
 	}
 
-	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.srcImage.sid, NULL);
 }
 
 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -272,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 		return -EPERM;
 	}
 
-	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter, &cmd->body.sid,
+				 NULL);
 }
 
 /**
  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
  *
  * @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
  * @new_query_bo: The new buffer holding query results.
  * @sw_context: The software context used for this command submission.
  *
@@ -287,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  * query results, and if another buffer currently is pinned for query
  * results. If so, the function prepares the state of @sw_context for
  * switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
+ * command batch.
  */
 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
-				       uint32_t cid,
 				       struct ttm_buffer_object *new_query_bo,
 				       struct vmw_sw_context *sw_context)
 {
+	struct vmw_res_cache_entry *ctx_entry =
+		&sw_context->res_cache[vmw_res_context];
 	int ret;
-	bool add_cid = false;
-	uint32_t cid_to_add;
+
+	BUG_ON(!ctx_entry->valid);
+	sw_context->last_query_ctx = ctx_entry->res;
 
 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
@@ -308,12 +583,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 		}
 
 		if (unlikely(sw_context->cur_query_bo != NULL)) {
-			BUG_ON(!sw_context->query_cid_valid);
-			add_cid = true;
-			cid_to_add = sw_context->cur_query_cid;
+			sw_context->needs_post_query_barrier = true;
 			ret = vmw_bo_to_validate_list(sw_context,
 						      sw_context->cur_query_bo,
-						      DRM_VMW_FENCE_FLAG_EXEC,
 						      NULL);
 			if (unlikely(ret != 0))
 				return ret;
@@ -322,35 +594,12 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
 		ret = vmw_bo_to_validate_list(sw_context,
 					      dev_priv->dummy_query_bo,
-					      DRM_VMW_FENCE_FLAG_EXEC,
 					      NULL);
 		if (unlikely(ret != 0))
 			return ret;
 
 	}
 
-	if (unlikely(cid != sw_context->cur_query_cid &&
-		     sw_context->query_cid_valid)) {
-		add_cid = true;
-		cid_to_add = sw_context->cur_query_cid;
-	}
-
-	sw_context->cur_query_cid = cid;
-	sw_context->query_cid_valid = true;
-
-	if (add_cid) {
-		struct vmw_resource *ctx = sw_context->cur_ctx;
-
-		if (list_empty(&ctx->query_head))
-			list_add_tail(&ctx->query_head,
-				      &sw_context->query_list);
-		ret = vmw_bo_to_validate_list(sw_context,
-					      dev_priv->dummy_query_bo,
-					      DRM_VMW_FENCE_FLAG_EXEC,
-					      NULL);
-		if (unlikely(ret != 0))
-			return ret;
-	}
 	return 0;
 }
 
@@ -362,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
  * @sw_context: The software context used for this command submission batch.
  *
  * This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
  * issue a dummy occlusion query wait used as a query barrier. When the fence
  * object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
+ * preceding queries have finished, and the old query buffer can be unpinned.
  * However, since both the new query buffer and the old one are fenced with
  * that fence, we can do an asynchronus unpin now, and be sure that the
  * old query buffer won't be moved until the fence has signaled.
@@ -376,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 				     struct vmw_sw_context *sw_context)
 {
-
-	struct vmw_resource *ctx, *next_ctx;
-	int ret;
-
 	/*
 	 * The validate list should still hold references to all
 	 * contexts here.
 	 */
 
-	list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
-				 query_head) {
-		list_del_init(&ctx->query_head);
+	if (sw_context->needs_post_query_barrier) {
+		struct vmw_res_cache_entry *ctx_entry =
+			&sw_context->res_cache[vmw_res_context];
+		struct vmw_resource *ctx;
+		int ret;
 
-		BUG_ON(list_empty(&ctx->validate_head));
+		BUG_ON(!ctx_entry->valid);
+		ctx = ctx_entry->res;
 
 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 
@@ -403,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 			ttm_bo_unref(&dev_priv->pinned_bo);
 		}
 
-		vmw_bo_pin(sw_context->cur_query_bo, true);
+		if (!sw_context->needs_post_query_barrier) {
+			vmw_bo_pin(sw_context->cur_query_bo, true);
 
-		/*
-		 * We pin also the dummy_query_bo buffer so that we
-		 * don't need to validate it when emitting
-		 * dummy queries in context destroy paths.
-		 */
+			/*
+			 * We pin also the dummy_query_bo buffer so that we
+			 * don't need to validate it when emitting
+			 * dummy queries in context destroy paths.
+			 */
 
-		vmw_bo_pin(dev_priv->dummy_query_bo, true);
-		dev_priv->dummy_query_bo_pinned = true;
+			vmw_bo_pin(dev_priv->dummy_query_bo, true);
+			dev_priv->dummy_query_bo_pinned = true;
 
-		dev_priv->query_cid = sw_context->cur_query_cid;
-		dev_priv->pinned_bo =
-			ttm_bo_reference(sw_context->cur_query_bo);
+			BUG_ON(sw_context->last_query_ctx == NULL);
+			dev_priv->query_cid = sw_context->last_query_ctx->id;
+			dev_priv->query_cid_valid = true;
+			dev_priv->pinned_bo =
+				ttm_bo_reference(sw_context->cur_query_bo);
+		}
 	}
 }
 
 /**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
  *
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
  *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
  */
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
-	struct list_head *list, *next;
-
-	list_for_each_safe(list, next, &sw_context->query_list) {
-		list_del_init(list);
-	}
-}
-
 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 				   struct vmw_sw_context *sw_context,
 				   SVGAGuestPtr *ptr,
@@ -465,8 +718,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 	reloc->location = ptr;
 
-	ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
-				      &reloc->index);
+	ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -479,6 +731,37 @@ out_no_reloc:
 	return ret;
 }
 
+/**
+ * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+			       struct vmw_sw_context *sw_context,
+			       SVGA3dCmdHeader *header)
+{
+	struct vmw_begin_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBeginQuery q;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_begin_query_cmd,
+			   header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 user_context_converter, &cmd->q.cid,
+				 NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 			     struct vmw_sw_context *sw_context,
 			     SVGA3dCmdHeader *header)
@@ -501,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
-					  &vmw_bo->base, sw_context);
+	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
 
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
 }
 
+/*
+ * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 			      struct vmw_sw_context *sw_context,
 			      SVGA3dCmdHeader *header)
@@ -518,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 		SVGA3dCmdWaitForQuery q;
 	} *cmd;
 	int ret;
-	struct vmw_resource *ctx;
 
 	cmd = container_of(header, struct vmw_query_cmd, header);
 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -532,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 		return ret;
 
 	vmw_dmabuf_unreference(&vmw_bo);
-
-	/*
-	 * This wait will act as a barrier for previous waits for this
-	 * context.
-	 */
-
-	ctx = sw_context->cur_ctx;
-	if (!list_empty(&ctx->query_head))
-		list_del_init(&ctx->query_head);
-
 	return 0;
 }
 
@@ -550,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 		       SVGA3dCmdHeader *header)
 {
 	struct vmw_dma_buffer *vmw_bo = NULL;
-	struct ttm_buffer_object *bo;
 	struct vmw_surface *srf = NULL;
 	struct vmw_dma_cmd {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSurfaceDMA dma;
 	} *cmd;
 	int ret;
-	struct vmw_resource *res;
 
 	cmd = container_of(header, struct vmw_dma_cmd, header);
 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -566,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	bo = &vmw_bo->base;
-	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
-					     cmd->dma.host.sid, &srf);
-	if (ret) {
-		DRM_ERROR("could not find surface\n");
-		goto out_no_reloc;
-	}
-
-	ret = vmw_surface_validate(dev_priv, srf);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter, &cmd->dma.host.sid,
+				NULL);
 	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Culd not validate surface.\n");
-		goto out_no_validate;
+		if (unlikely(ret != -ERESTARTSYS))
+			DRM_ERROR("could not find surface for DMA.\n");
+		goto out_no_surface;
 	}
 
-	/*
-	 * Patch command stream with device SID.
-	 */
-	cmd->dma.host.sid = srf->res.id;
-	vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
+	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
-	vmw_dmabuf_unreference(&vmw_bo);
+	vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
 
-	res = &srf->res;
-	vmw_resource_to_validate_list(sw_context, &res);
-
-	return 0;
-
-out_no_validate:
-	vmw_surface_unreference(&srf);
-out_no_reloc:
+out_no_surface:
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
 }
@@ -629,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
 	}
 
 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
-		ret = vmw_cmd_sid_check(dev_priv, sw_context,
-					&decl->array.surfaceId);
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&decl->array.surfaceId, NULL);
 		if (unlikely(ret != 0))
 			return ret;
 	}
@@ -644,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
 
 	range = (SVGA3dPrimitiveRange *) decl;
 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
-		ret = vmw_cmd_sid_check(dev_priv, sw_context,
-					&range->indexArray.surfaceId);
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&range->indexArray.surfaceId, NULL);
 		if (unlikely(ret != 0))
 			return ret;
 	}
@@ -676,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
 			continue;
 
-		ret = vmw_cmd_sid_check(dev_priv, sw_context,
-					&cur_state->value);
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&cur_state->value, NULL);
 		if (unlikely(ret != 0))
 			return ret;
 	}
@@ -708,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
 	return ret;
 }
 
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	struct vmw_set_shader_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShader body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_set_shader_cmd,
+			   header);
+
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return 0;
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context,
 				void *buf, uint32_t *size)
@@ -781,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
-	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
-		    &vmw_cmd_blt_surf_screen_check)
+		    &vmw_cmd_blt_surf_screen_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -837,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
 	int32_t cur_size = size;
 	int ret;
 
+	sw_context->buf_start = buf;
+
 	while (cur_size > 0) {
 		size = cur_size;
 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -868,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
 
 	for (i = 0; i < sw_context->cur_reloc; ++i) {
 		reloc = &sw_context->relocs[i];
-		validate = &sw_context->val_bufs[reloc->index];
+		validate = &sw_context->val_bufs[reloc->index].base;
 		bo = validate->bo;
-		if (bo->mem.mem_type == TTM_PL_VRAM) {
+		switch (bo->mem.mem_type) {
+		case TTM_PL_VRAM:
 			reloc->location->offset += bo->offset;
 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
-		} else
+			break;
+		case VMW_PL_GMR:
 			reloc->location->gmrId = bo->mem.start;
+			break;
+		default:
+			BUG();
+		}
 	}
 	vmw_free_relocations(sw_context);
 }
 
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+	struct vmw_resource_val_node *val, *val_next;
+
+	/*
+	 * Drop references to resources held during command submission.
+	 */
+
+	list_for_each_entry_safe(val, val_next, list, head) {
+		list_del_init(&val->head);
+		vmw_resource_unreference(&val->res);
+		kfree(val);
+	}
+}
+
 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 {
-	struct ttm_validate_buffer *entry, *next;
-	struct vmw_resource *res, *res_next;
+	struct vmw_validate_buffer *entry, *next;
+	struct vmw_resource_val_node *val;
 
 	/*
 	 * Drop references to DMA buffers held during command submission.
 	 */
 	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
-				 head) {
-		list_del(&entry->head);
-		vmw_dmabuf_validate_clear(entry->bo);
-		ttm_bo_unref(&entry->bo);
+				 base.head) {
+		list_del(&entry->base.head);
+		ttm_bo_unref(&entry->base.bo);
+		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
 		sw_context->cur_val_buf--;
 	}
 	BUG_ON(sw_context->cur_val_buf != 0);
 
-	/*
-	 * Drop references to resources held during command submission.
-	 */
-	vmw_resource_unreserve(&sw_context->resource_list);
-	list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
-				 validate_head) {
-		list_del_init(&res->validate_head);
-		vmw_resource_unreference(&res);
-	}
+	list_for_each_entry(val, &sw_context->resource_list, head)
+		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
 }
 
 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -929,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 	 * used as a GMR, this will return -ENOMEM.
 	 */
 
-	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
 	if (likely(ret == 0 || ret == -ERESTARTSYS))
 		return ret;
 
@@ -939,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 	 */
 
 	DRM_INFO("Falling through to VRAM.\n");
-	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
 	return ret;
 }
 
@@ -947,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 static int vmw_validate_buffers(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context)
 {
-	struct ttm_validate_buffer *entry;
+	struct vmw_validate_buffer *entry;
 	int ret;
 
-	list_for_each_entry(entry, &sw_context->validate_nodes, head) {
-		ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
 		if (unlikely(ret != 0))
 			return ret;
 	}
@@ -1114,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 {
 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
 	struct vmw_fence_obj *fence = NULL;
+	struct vmw_resource *error_resource;
+	struct list_head resource_list;
 	uint32_t handle;
 	void *cmd;
 	int ret;
@@ -1143,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 		sw_context->kernel = true;
 
 	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
-	sw_context->cid_valid = false;
-	sw_context->sid_valid = false;
 	sw_context->cur_reloc = 0;
 	sw_context->cur_val_buf = 0;
 	sw_context->fence_flags = 0;
-	INIT_LIST_HEAD(&sw_context->query_list);
 	INIT_LIST_HEAD(&sw_context->resource_list);
 	sw_context->cur_query_bo = dev_priv->pinned_bo;
-	sw_context->cur_query_cid = dev_priv->query_cid;
-	sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
-
+	sw_context->last_query_ctx = NULL;
+	sw_context->needs_post_query_barrier = false;
+	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
 	INIT_LIST_HEAD(&sw_context->validate_nodes);
+	INIT_LIST_HEAD(&sw_context->res_relocations);
+	if (!sw_context->res_ht_initialized) {
+		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+		if (unlikely(ret != 0))
+			goto out_unlock;
+		sw_context->res_ht_initialized = true;
+	}
 
+	INIT_LIST_HEAD(&resource_list);
 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
 				command_size);
 	if (unlikely(ret != 0))
 		goto out_err;
 
+	ret = vmw_resources_reserve(sw_context);
+	if (unlikely(ret != 0))
+		goto out_err;
+
 	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
 	if (unlikely(ret != 0))
 		goto out_err;
@@ -1169,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	if (unlikely(ret != 0))
 		goto out_err;
 
-	vmw_apply_relocations(sw_context);
+	ret = vmw_resources_validate(sw_context);
+	if (unlikely(ret != 0))
+		goto out_err;
 
 	if (throttle_us) {
 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
 				   throttle_us);
 
 		if (unlikely(ret != 0))
-			goto out_throttle;
+			goto out_err;
 	}
 
 	cmd = vmw_fifo_reserve(dev_priv, command_size);
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving fifo space for commands.\n");
 		ret = -ENOMEM;
-		goto out_throttle;
+		goto out_err;
 	}
 
+	vmw_apply_relocations(sw_context);
 	memcpy(cmd, kernel_commands, command_size);
+
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+
 	vmw_fifo_commit(dev_priv, command_size);
 
 	vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1202,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	if (ret != 0)
 		DRM_ERROR("Fence submission error. Syncing.\n");
 
+	vmw_resource_list_unreserve(&sw_context->resource_list, false);
 	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
 				    (void *) fence);
 
+	if (unlikely(dev_priv->pinned_bo != NULL &&
+		     !dev_priv->query_cid_valid))
+		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
 	vmw_clear_validations(sw_context);
 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
 				    user_fence_rep, fence, handle);
@@ -1217,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 		vmw_fence_obj_unreference(&fence);
 	}
 
+	list_splice_init(&sw_context->resource_list, &resource_list);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	/*
+	 * Unreference resources outside of the cmdbuf_mutex to
+	 * avoid deadlocks in resource destruction paths.
+	 */
+	vmw_resource_list_unreference(&resource_list);
+
 	return 0;
 
 out_err:
+	vmw_resource_relocations_free(&sw_context->res_relocations);
 	vmw_free_relocations(sw_context);
-out_throttle:
-	vmw_query_switch_backoff(sw_context);
 	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+	vmw_resource_list_unreserve(&sw_context->resource_list, true);
 	vmw_clear_validations(sw_context);
+	if (unlikely(dev_priv->pinned_bo != NULL &&
+		     !dev_priv->query_cid_valid))
+		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 out_unlock:
+	list_splice_init(&sw_context->resource_list, &resource_list);
+	error_resource = sw_context->error_resource;
+	sw_context->error_resource = NULL;
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	/*
+	 * Unreference resources outside of the cmdbuf_mutex to
+	 * avoid deadlocks in resource destruction paths.
+	 */
+	vmw_resource_list_unreference(&resource_list);
+	if (unlikely(error_resource != NULL))
+		vmw_resource_unreference(&error_resource);
+
 	return ret;
 }
 
@@ -1251,14 +1613,101 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
 }
 
 
+/**
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
+ */
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+				     struct vmw_fence_obj *fence)
+{
+	int ret = 0;
+	struct list_head validate_list;
+	struct ttm_validate_buffer pinned_val, query_val;
+	struct vmw_fence_obj *lfence = NULL;
+
+	if (dev_priv->pinned_bo == NULL)
+		goto out_unlock;
+
+	INIT_LIST_HEAD(&validate_list);
+
+	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+	list_add_tail(&pinned_val.head, &validate_list);
+
+	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+	list_add_tail(&query_val.head, &validate_list);
+
+	do {
+		ret = ttm_eu_reserve_buffers(&validate_list);
+	} while (ret == -ERESTARTSYS);
+
+	if (unlikely(ret != 0)) {
+		vmw_execbuf_unpin_panic(dev_priv);
+		goto out_no_reserve;
+	}
+
+	if (dev_priv->query_cid_valid) {
+		BUG_ON(fence != NULL);
+		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+		if (unlikely(ret != 0)) {
+			vmw_execbuf_unpin_panic(dev_priv);
+			goto out_no_emit;
+		}
+		dev_priv->query_cid_valid = false;
+	}
+
+	vmw_bo_pin(dev_priv->pinned_bo, false);
+	vmw_bo_pin(dev_priv->dummy_query_bo, false);
+	dev_priv->dummy_query_bo_pinned = false;
+
+	if (fence == NULL) {
+		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+						  NULL);
+		fence = lfence;
+	}
+	ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+	if (lfence != NULL)
+		vmw_fence_obj_unreference(&lfence);
+
+	ttm_bo_unref(&query_val.bo);
+	ttm_bo_unref(&pinned_val.bo);
+	ttm_bo_unref(&dev_priv->pinned_bo);
+
+out_unlock:
+	return;
+
+out_no_emit:
+	ttm_eu_backoff_reservation(&validate_list);
+out_no_reserve:
+	ttm_bo_unref(&query_val.bo);
+	ttm_bo_unref(&pinned_val.bo);
+	ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
 /**
  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
  * query bo.
  *
  * @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
  *
  * This function should be used to unpin the pinned query bo, or
  * as a query barrier when we need to make sure that all queries have
@@ -1272,69 +1721,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
  * The function will synchronize on the previous query barrier, and will
  * thus not finish until that barrier has executed.
  */
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
-				   bool only_on_cid_match, uint32_t cid)
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
 {
-	int ret = 0;
-	struct list_head validate_list;
-	struct ttm_validate_buffer pinned_val, query_val;
-	struct vmw_fence_obj *fence;
-
 	mutex_lock(&dev_priv->cmdbuf_mutex);
-
-	if (dev_priv->pinned_bo == NULL)
-		goto out_unlock;
-
-	if (only_on_cid_match && cid != dev_priv->query_cid)
-		goto out_unlock;
-
-	INIT_LIST_HEAD(&validate_list);
-
-	pinned_val.new_sync_obj_arg = (void *)(unsigned long)
-		DRM_VMW_FENCE_FLAG_EXEC;
-	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
-	list_add_tail(&pinned_val.head, &validate_list);
-
-	query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
-	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
-	list_add_tail(&query_val.head, &validate_list);
-
-	do {
-		ret = ttm_eu_reserve_buffers(&validate_list);
-	} while (ret == -ERESTARTSYS);
-
-	if (unlikely(ret != 0)) {
-		vmw_execbuf_unpin_panic(dev_priv);
-		goto out_no_reserve;
-	}
-
-	ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
-	if (unlikely(ret != 0)) {
-		vmw_execbuf_unpin_panic(dev_priv);
-		goto out_no_emit;
-	}
-
-	vmw_bo_pin(dev_priv->pinned_bo, false);
-	vmw_bo_pin(dev_priv->dummy_query_bo, false);
-	dev_priv->dummy_query_bo_pinned = false;
-
-	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-	ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
-
-	ttm_bo_unref(&query_val.bo);
-	ttm_bo_unref(&pinned_val.bo);
-	ttm_bo_unref(&dev_priv->pinned_bo);
-
-out_unlock:
-	mutex_unlock(&dev_priv->cmdbuf_mutex);
-	return;
-
-out_no_emit:
-	ttm_eu_backoff_reservation(&validate_list);
-out_no_reserve:
-	ttm_bo_unref(&query_val.bo);
-	ttm_bo_unref(&pinned_val.bo);
-	ttm_bo_unref(&dev_priv->pinned_bo);
+	if (dev_priv->query_cid_valid)
+		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index bc187fafd58c..c62d20e8a6f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
 		container_of(fence, struct vmw_user_fence, fence);
 	struct vmw_fence_manager *fman = fence->fman;
 
-	kfree(ufence);
+	ttm_base_object_kfree(ufence, base);
 	/*
 	 * Free kernel space accounting.
 	 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 7290811f89be..d9fbbe191071 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -133,6 +133,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
 	struct drm_vmw_rect *clips = NULL;
 	struct drm_mode_object *obj;
 	struct vmw_framebuffer *vfb;
+	struct vmw_resource *res;
 	uint32_t num_clips;
 	int ret;
 
@@ -180,11 +181,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
 	if (unlikely(ret != 0))
 		goto out_no_ttm_lock;
 
-	ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
-					     &surface);
+	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+					      user_surface_converter,
+					      &res);
 	if (ret)
 		goto out_no_surface;
 
+	surface = vmw_res_to_srf(res);
 	ret = vmw_kms_present(dev_priv, file_priv,
 			      vfb, surface, arg->sid,
 			      arg->dest_x, arg->dest_y,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 070fb239c5af..79f7e8e60529 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.dirty_info_property,
 				      1);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cb55b7b66377..87e39f68e9d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -35,6 +35,7 @@
 #include "svga_escape.h"
 
 #define VMW_MAX_NUM_STREAMS 1
+#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
 
 struct vmw_stream {
 	struct vmw_dma_buffer *buf;
@@ -449,6 +450,14 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
 	return 0;
 }
 
+
+static bool vmw_overlay_available(const struct vmw_private *dev_priv)
+{
+	return (dev_priv->overlay_priv != NULL && 
+		((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+		 VMW_OVERLAY_CAP_MASK));
+}
+
 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv)
 {
@@ -461,7 +470,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
 	struct vmw_resource *res;
 	int ret;
 
-	if (!overlay)
+	if (!vmw_overlay_available(dev_priv))
 		return -ENOSYS;
 
 	ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
@@ -492,7 +501,7 @@ out_unlock:
 
 int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
 {
-	if (!dev_priv->overlay_priv)
+	if (!vmw_overlay_available(dev_priv))
 		return 0;
 
 	return VMW_MAX_NUM_STREAMS;
@@ -503,7 +512,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
 	struct vmw_overlay *overlay = dev_priv->overlay_priv;
 	int i, k;
 
-	if (!overlay)
+	if (!vmw_overlay_available(dev_priv))
 		return 0;
 
 	mutex_lock(&overlay->mutex);
@@ -569,12 +578,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
 	if (dev_priv->overlay_priv)
 		return -EINVAL;
 
-	if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
-	     (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
-		DRM_INFO("hardware doesn't support overlays\n");
-		return -ENOSYS;
-	}
-
 	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
 	if (!overlay)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index da3c6b5b98a1..e01a17b407b2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -30,17 +30,7 @@
 #include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
-
-struct vmw_user_context {
-	struct ttm_base_object base;
-	struct vmw_resource res;
-};
-
-struct vmw_user_surface {
-	struct ttm_base_object base;
-	struct vmw_surface srf;
-	uint32_t size;
-};
+#include "vmwgfx_resource_priv.h"
 
 struct vmw_user_dma_buffer {
 	struct ttm_base_object base;
@@ -62,17 +52,21 @@ struct vmw_user_stream {
 	struct vmw_stream stream;
 };
 
-struct vmw_surface_offset {
-	uint32_t face;
-	uint32_t mip;
-	uint32_t bo_offset;
-};
 
-
-static uint64_t vmw_user_context_size;
-static uint64_t vmw_user_surface_size;
 static uint64_t vmw_user_stream_size;
 
+static const struct vmw_res_func vmw_stream_func = {
+	.res_type = vmw_res_stream,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "video streams",
+	.backup_placement = NULL,
+	.create = NULL,
+	.destroy = NULL,
+	.bind = NULL,
+	.unbind = NULL
+};
+
 static inline struct vmw_dma_buffer *
 vmw_dma_buffer(struct ttm_buffer_object *bo)
 {
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  *
  * Release the resource id to the resource id manager and set it to -1
  */
-static void vmw_resource_release_id(struct vmw_resource *res)
+void vmw_resource_release_id(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
 	write_lock(&dev_priv->resource_lock);
 	if (res->id != -1)
-		idr_remove(res->idr, res->id);
+		idr_remove(idr, res->id);
 	res->id = -1;
 	write_unlock(&dev_priv->resource_lock);
 }
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref)
 	struct vmw_resource *res =
 	    container_of(kref, struct vmw_resource, kref);
 	struct vmw_private *dev_priv = res->dev_priv;
-	int id = res->id;
-	struct idr *idr = res->idr;
+	int id;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
 	res->avail = false;
-	if (res->remove_from_lists != NULL)
-		res->remove_from_lists(res);
+	list_del_init(&res->lru_head);
 	write_unlock(&dev_priv->resource_lock);
+	if (res->backup) {
+		struct ttm_buffer_object *bo = &res->backup->base;
+
+		ttm_bo_reserve(bo, false, false, false, 0);
+		if (!list_empty(&res->mob_head) &&
+		    res->func->unbind != NULL) {
+			struct ttm_validate_buffer val_buf;
+
+			val_buf.bo = bo;
+			res->func->unbind(res, false, &val_buf);
+		}
+		res->backup_dirty = false;
+		list_del_init(&res->mob_head);
+		ttm_bo_unreserve(bo);
+		vmw_dmabuf_unreference(&res->backup);
+	}
 
 	if (likely(res->hw_destroy != NULL))
 		res->hw_destroy(res);
 
+	id = res->id;
 	if (res->res_free != NULL)
 		res->res_free(res);
 	else
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
 /**
  * vmw_resource_alloc_id - release a resource id to the id manager.
  *
- * @dev_priv: Pointer to the device private structure.
  * @res: Pointer to the resource.
  *
  * Allocate the lowest free resource from the resource manager, and set
  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
  */
-static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
-				 struct vmw_resource *res)
+int vmw_resource_alloc_id(struct vmw_resource *res)
 {
+	struct vmw_private *dev_priv = res->dev_priv;
 	int ret;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
 	BUG_ON(res->id != -1);
 
 	do {
-		if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
+		if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
 			return -ENOMEM;
 
 		write_lock(&dev_priv->resource_lock);
-		ret = idr_get_new_above(res->idr, res, 1, &res->id);
+		ret = idr_get_new_above(idr, res, 1, &res->id);
 		write_unlock(&dev_priv->resource_lock);
 
 	} while (ret == -EAGAIN);
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
 	return ret;
 }
 
-
-static int vmw_resource_init(struct vmw_private *dev_priv,
-			     struct vmw_resource *res,
-			     struct idr *idr,
-			     enum ttm_object_type obj_type,
-			     bool delay_id,
-			     void (*res_free) (struct vmw_resource *res),
-			     void (*remove_from_lists)
-			     (struct vmw_resource *res))
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @res:            The struct vmw_resource to initialize.
+ * @obj_type:       Resource object type.
+ * @delay_id:       Boolean whether to defer device id allocation until
+ *                  the first validation.
+ * @res_free:       Resource destructor.
+ * @func:           Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+		      bool delay_id,
+		      void (*res_free) (struct vmw_resource *res),
+		      const struct vmw_res_func *func)
 {
 	kref_init(&res->kref);
 	res->hw_destroy = NULL;
 	res->res_free = res_free;
-	res->remove_from_lists = remove_from_lists;
-	res->res_type = obj_type;
-	res->idr = idr;
 	res->avail = false;
 	res->dev_priv = dev_priv;
-	INIT_LIST_HEAD(&res->query_head);
-	INIT_LIST_HEAD(&res->validate_head);
+	res->func = func;
+	INIT_LIST_HEAD(&res->lru_head);
+	INIT_LIST_HEAD(&res->mob_head);
 	res->id = -1;
+	res->backup = NULL;
+	res->backup_offset = 0;
+	res->backup_dirty = false;
+	res->res_dirty = false;
 	if (delay_id)
 		return 0;
 	else
-		return vmw_resource_alloc_id(dev_priv, res);
+		return vmw_resource_alloc_id(res);
 }
 
 /**
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
  * Activate basically means that the function vmw_resource_lookup will
  * find it.
  */
-
-static void vmw_resource_activate(struct vmw_resource *res,
-				  void (*hw_destroy) (struct vmw_resource *))
+void vmw_resource_activate(struct vmw_resource *res,
+			   void (*hw_destroy) (struct vmw_resource *))
 {
 	struct vmw_private *dev_priv = res->dev_priv;
 
@@ -250,946 +268,57 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 }
 
 /**
- * Context management:
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv:     Pointer to a device private struct
+ * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+ * @handle:       The TTM user-space handle
+ * @converter:    Pointer to an object describing the resource type
+ * @p_res:        On successful return the location pointed to will contain
+ *                a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
  */
-
-static void vmw_hw_context_destroy(struct vmw_resource *res)
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+				    struct ttm_object_file *tfile,
+				    uint32_t handle,
+				    const struct vmw_user_resource_conv
+				    *converter,
+				    struct vmw_resource **p_res)
 {
-
-	struct vmw_private *dev_priv = res->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdDestroyContext body;
-	} *cmd;
-
-
-	vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for surface "
-			  "destruction.\n");
-		return;
-	}
-
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-	cmd->body.cid = cpu_to_le32(res->id);
-
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	vmw_3d_resource_dec(dev_priv, false);
-}
-
-static int vmw_context_init(struct vmw_private *dev_priv,
-			    struct vmw_resource *res,
-			    void (*res_free) (struct vmw_resource *res))
-{
-	int ret;
-
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdDefineContext body;
-	} *cmd;
-
-	ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
-				VMW_RES_CONTEXT, false, res_free, NULL);
-
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed to allocate a resource id.\n");
-		goto out_early;
-	}
-
-	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
-		DRM_ERROR("Out of hw context ids.\n");
-		vmw_resource_unreference(&res);
-		return -ENOMEM;
-	}
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Fifo reserve failed.\n");
-		vmw_resource_unreference(&res);
-		return -ENOMEM;
-	}
-
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-	cmd->body.cid = cpu_to_le32(res->id);
-
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
-	vmw_resource_activate(res, vmw_hw_context_destroy);
-	return 0;
-
-out_early:
-	if (res_free == NULL)
-		kfree(res);
-	else
-		res_free(res);
-	return ret;
-}
-
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
-	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
-	int ret;
-
-	if (unlikely(res == NULL))
-		return NULL;
-
-	ret = vmw_context_init(dev_priv, res, NULL);
-	return (ret == 0) ? res : NULL;
-}
-
-/**
- * User-space context management:
- */
-
-static void vmw_user_context_free(struct vmw_resource *res)
-{
-	struct vmw_user_context *ctx =
-	    container_of(res, struct vmw_user_context, res);
-	struct vmw_private *dev_priv = res->dev_priv;
-
-	kfree(ctx);
-	ttm_mem_global_free(vmw_mem_glob(dev_priv),
-			    vmw_user_context_size);
-}
-
-/**
- * This function is called when user space has no more references on the
- * base object. It releases the base-object's reference on the resource object.
- */
-
-static void vmw_user_context_base_release(struct ttm_base_object **p_base)
-{
-	struct ttm_base_object *base = *p_base;
-	struct vmw_user_context *ctx =
-	    container_of(base, struct vmw_user_context, base);
-	struct vmw_resource *res = &ctx->res;
-
-	*p_base = NULL;
-	vmw_resource_unreference(&res);
-}
-
-int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv)
-{
-	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct ttm_base_object *base;
 	struct vmw_resource *res;
-	struct vmw_user_context *ctx;
-	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
-	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-	int ret = 0;
+	int ret = -EINVAL;
 
-	res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
-	if (unlikely(res == NULL))
+	base = ttm_base_object_lookup(tfile, handle);
+	if (unlikely(base == NULL))
 		return -EINVAL;
 
-	if (res->res_free != &vmw_user_context_free) {
-		ret = -EINVAL;
-		goto out;
-	}
+	if (unlikely(base->object_type != converter->object_type))
+		goto out_bad_resource;
 
-	ctx = container_of(res, struct vmw_user_context, res);
-	if (ctx->base.tfile != tfile && !ctx->base.shareable) {
-		ret = -EPERM;
-		goto out;
-	}
-
-	ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
-out:
-	vmw_resource_unreference(&res);
-	return ret;
-}
-
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv)
-{
-	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct vmw_user_context *ctx;
-	struct vmw_resource *res;
-	struct vmw_resource *tmp;
-	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
-	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-	struct vmw_master *vmaster = vmw_master(file_priv->master);
-	int ret;
-
-
-	/*
-	 * Approximate idr memory usage with 128 bytes. It will be limited
-	 * by maximum number_of contexts anyway.
-	 */
-
-	if (unlikely(vmw_user_context_size == 0))
-		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
-
-	ret = ttm_read_lock(&vmaster->lock, true);
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-				   vmw_user_context_size,
-				   false, true);
-	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Out of graphics memory for context"
-				  " creation.\n");
-		goto out_unlock;
-	}
-
-	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
-	if (unlikely(ctx == NULL)) {
-		ttm_mem_global_free(vmw_mem_glob(dev_priv),
-				    vmw_user_context_size);
-		ret = -ENOMEM;
-		goto out_unlock;
-	}
-
-	res = &ctx->res;
-	ctx->base.shareable = false;
-	ctx->base.tfile = NULL;
-
-	/*
-	 * From here on, the destructor takes over resource freeing.
-	 */
-
-	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
-	if (unlikely(ret != 0))
-		goto out_unlock;
-
-	tmp = vmw_resource_reference(&ctx->res);
-	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
-				   &vmw_user_context_base_release, NULL);
-
-	if (unlikely(ret != 0)) {
-		vmw_resource_unreference(&tmp);
-		goto out_err;
-	}
-
-	arg->cid = res->id;
-out_err:
-	vmw_resource_unreference(&res);
-out_unlock:
-	ttm_read_unlock(&vmaster->lock);
-	return ret;
-
-}
-
-int vmw_context_check(struct vmw_private *dev_priv,
-		      struct ttm_object_file *tfile,
-		      int id,
-		      struct vmw_resource **p_res)
-{
-	struct vmw_resource *res;
-	int ret = 0;
+	res = converter->base_obj_to_res(base);
 
 	read_lock(&dev_priv->resource_lock);
-	res = idr_find(&dev_priv->context_idr, id);
-	if (res && res->avail) {
-		struct vmw_user_context *ctx =
-			container_of(res, struct vmw_user_context, res);
-		if (ctx->base.tfile != tfile && !ctx->base.shareable)
-			ret = -EPERM;
-		if (p_res)
-			*p_res = vmw_resource_reference(res);
-	} else
-		ret = -EINVAL;
+	if (!res->avail || res->res_free != converter->res_free) {
+		read_unlock(&dev_priv->resource_lock);
+		goto out_bad_resource;
+	}
+
+	kref_get(&res->kref);
 	read_unlock(&dev_priv->resource_lock);
 
-	return ret;
-}
+	*p_res = res;
+	ret = 0;
 
-struct vmw_bpp {
-	uint8_t bpp;
-	uint8_t s_bpp;
-};
-
-/*
- * Size table for the supported SVGA3D surface formats. It consists of
- * two values. The bpp value and the s_bpp value which is short for
- * "stride bits per pixel" The values are given in such a way that the
- * minimum stride for the image is calculated using
- *
- * min_stride = w*s_bpp
- *
- * and the total memory requirement for the image is
- *
- * h*min_stride*bpp/s_bpp
- *
- */
-static const struct vmw_bpp vmw_sf_bpp[] = {
-	[SVGA3D_FORMAT_INVALID] = {0, 0},
-	[SVGA3D_X8R8G8B8] = {32, 32},
-	[SVGA3D_A8R8G8B8] = {32, 32},
-	[SVGA3D_R5G6B5] = {16, 16},
-	[SVGA3D_X1R5G5B5] = {16, 16},
-	[SVGA3D_A1R5G5B5] = {16, 16},
-	[SVGA3D_A4R4G4B4] = {16, 16},
-	[SVGA3D_Z_D32] = {32, 32},
-	[SVGA3D_Z_D16] = {16, 16},
-	[SVGA3D_Z_D24S8] = {32, 32},
-	[SVGA3D_Z_D15S1] = {16, 16},
-	[SVGA3D_LUMINANCE8] = {8, 8},
-	[SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
-	[SVGA3D_LUMINANCE16] = {16, 16},
-	[SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
-	[SVGA3D_DXT1] = {4, 16},
-	[SVGA3D_DXT2] = {8, 32},
-	[SVGA3D_DXT3] = {8, 32},
-	[SVGA3D_DXT4] = {8, 32},
-	[SVGA3D_DXT5] = {8, 32},
-	[SVGA3D_BUMPU8V8] = {16, 16},
-	[SVGA3D_BUMPL6V5U5] = {16, 16},
-	[SVGA3D_BUMPX8L8V8U8] = {32, 32},
-	[SVGA3D_ARGB_S10E5] = {16, 16},
-	[SVGA3D_ARGB_S23E8] = {32, 32},
-	[SVGA3D_A2R10G10B10] = {32, 32},
-	[SVGA3D_V8U8] = {16, 16},
-	[SVGA3D_Q8W8V8U8] = {32, 32},
-	[SVGA3D_CxV8U8] = {16, 16},
-	[SVGA3D_X8L8V8U8] = {32, 32},
-	[SVGA3D_A2W10V10U10] = {32, 32},
-	[SVGA3D_ALPHA8] = {8, 8},
-	[SVGA3D_R_S10E5] = {16, 16},
-	[SVGA3D_R_S23E8] = {32, 32},
-	[SVGA3D_RG_S10E5] = {16, 16},
-	[SVGA3D_RG_S23E8] = {32, 32},
-	[SVGA3D_BUFFER] = {8, 8},
-	[SVGA3D_Z_D24X8] = {32, 32},
-	[SVGA3D_V16U16] = {32, 32},
-	[SVGA3D_G16R16] = {32, 32},
-	[SVGA3D_A16B16G16R16] = {64,  64},
-	[SVGA3D_UYVY] = {12, 12},
-	[SVGA3D_YUY2] = {12, 12},
-	[SVGA3D_NV12] = {12, 8},
-	[SVGA3D_AYUV] = {32, 32},
-	[SVGA3D_BC4_UNORM] = {4,  16},
-	[SVGA3D_BC5_UNORM] = {8,  32},
-	[SVGA3D_Z_DF16] = {16,  16},
-	[SVGA3D_Z_DF24] = {24,  24},
-	[SVGA3D_Z_D24S8_INT] = {32,  32}
-};
-
-
-/**
- * Surface management.
- */
-
-struct vmw_surface_dma {
-	SVGA3dCmdHeader header;
-	SVGA3dCmdSurfaceDMA body;
-	SVGA3dCopyBox cb;
-	SVGA3dCmdSurfaceDMASuffix suffix;
-};
-
-struct vmw_surface_define {
-	SVGA3dCmdHeader header;
-	SVGA3dCmdDefineSurface body;
-};
-
-struct vmw_surface_destroy {
-	SVGA3dCmdHeader header;
-	SVGA3dCmdDestroySurface body;
-};
-
-
-/**
- * vmw_surface_dma_size - Compute fifo size for a dma command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface dma command for backup or
- * restoration of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
-{
-	return srf->num_sizes * sizeof(struct vmw_surface_dma);
-}
-
-
-/**
- * vmw_surface_define_size - Compute fifo size for a surface define command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface define command for the definition
- * of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
-{
-	return sizeof(struct vmw_surface_define) + srf->num_sizes *
-		sizeof(SVGA3dSize);
-}
-
-
-/**
- * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
- *
- * Computes the required size for a surface destroy command for the destruction
- * of a hw surface.
- */
-static inline uint32_t vmw_surface_destroy_size(void)
-{
-	return sizeof(struct vmw_surface_destroy);
-}
-
-/**
- * vmw_surface_destroy_encode - Encode a surface_destroy command.
- *
- * @id: The surface id
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_destroy_encode(uint32_t id,
-				       void *cmd_space)
-{
-	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
-		cmd_space;
-
-	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.sid = id;
-}
-
-/**
- * vmw_surface_define_encode - Encode a surface_define command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_define_encode(const struct vmw_surface *srf,
-				      void *cmd_space)
-{
-	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
-		cmd_space;
-	struct drm_vmw_size *src_size;
-	SVGA3dSize *cmd_size;
-	uint32_t cmd_len;
-	int i;
-
-	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
-
-	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
-	cmd->header.size = cmd_len;
-	cmd->body.sid = srf->res.id;
-	cmd->body.surfaceFlags = srf->flags;
-	cmd->body.format = cpu_to_le32(srf->format);
-	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
-		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
-
-	cmd += 1;
-	cmd_size = (SVGA3dSize *) cmd;
-	src_size = srf->sizes;
-
-	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
-		cmd_size->width = src_size->width;
-		cmd_size->height = src_size->height;
-		cmd_size->depth = src_size->depth;
-	}
-}
-
-
-/**
- * vmw_surface_dma_encode - Encode a surface_dma command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
- * should be placed or read from.
- * @to_surface: Boolean whether to DMA to the surface or from the surface.
- */
-static void vmw_surface_dma_encode(struct vmw_surface *srf,
-				   void *cmd_space,
-				   const SVGAGuestPtr *ptr,
-				   bool to_surface)
-{
-	uint32_t i;
-	uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
-	uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
-
-	for (i = 0; i < srf->num_sizes; ++i) {
-		SVGA3dCmdHeader *header = &cmd->header;
-		SVGA3dCmdSurfaceDMA *body = &cmd->body;
-		SVGA3dCopyBox *cb = &cmd->cb;
-		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
-		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
-		const struct drm_vmw_size *cur_size = &srf->sizes[i];
-
-		header->id = SVGA_3D_CMD_SURFACE_DMA;
-		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
-
-		body->guest.ptr = *ptr;
-		body->guest.ptr.offset += cur_offset->bo_offset;
-		body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
-		body->host.sid = srf->res.id;
-		body->host.face = cur_offset->face;
-		body->host.mipmap = cur_offset->mip;
-		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
-				  SVGA3D_READ_HOST_VRAM);
-		cb->x = 0;
-		cb->y = 0;
-		cb->z = 0;
-		cb->srcx = 0;
-		cb->srcy = 0;
-		cb->srcz = 0;
-		cb->w = cur_size->width;
-		cb->h = cur_size->height;
-		cb->d = cur_size->depth;
-
-		suffix->suffixSize = sizeof(*suffix);
-		suffix->maximumOffset = body->guest.pitch*cur_size->height*
-			cur_size->depth*bpp / stride_bpp;
-		suffix->flags.discard = 0;
-		suffix->flags.unsynchronized = 0;
-		suffix->flags.reserved = 0;
-		++cmd;
-	}
-};
-
-
-static void vmw_hw_surface_destroy(struct vmw_resource *res)
-{
-
-	struct vmw_private *dev_priv = res->dev_priv;
-	struct vmw_surface *srf;
-	void *cmd;
-
-	if (res->id != -1) {
-
-		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
-		if (unlikely(cmd == NULL)) {
-			DRM_ERROR("Failed reserving FIFO space for surface "
-				  "destruction.\n");
-			return;
-		}
-
-		vmw_surface_destroy_encode(res->id, cmd);
-		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
-
-		/*
-		 * used_memory_size_atomic, or separate lock
-		 * to avoid taking dev_priv::cmdbuf_mutex in
-		 * the destroy path.
-		 */
-
-		mutex_lock(&dev_priv->cmdbuf_mutex);
-		srf = container_of(res, struct vmw_surface, res);
-		dev_priv->used_memory_size -= srf->backup_size;
-		mutex_unlock(&dev_priv->cmdbuf_mutex);
-
-	}
-	vmw_3d_resource_dec(dev_priv, false);
-}
-
-void vmw_surface_res_free(struct vmw_resource *res)
-{
-	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
-	if (srf->backup)
-		ttm_bo_unref(&srf->backup);
-	kfree(srf->offsets);
-	kfree(srf->sizes);
-	kfree(srf->snooper.image);
-	kfree(srf);
-}
-
-
-/**
- * vmw_surface_do_validate - make a surface available to the device.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * If the surface doesn't have a hw id, allocate one, and optionally
- * DMA the backed up surface contents to the device.
- *
- * Returns -EBUSY if there wasn't sufficient device resources to
- * complete the validation. Retry after freeing up resources.
- *
- * May return other errors if the kernel is out of guest resources.
- */
-int vmw_surface_do_validate(struct vmw_private *dev_priv,
-			    struct vmw_surface *srf)
-{
-	struct vmw_resource *res = &srf->res;
-	struct list_head val_list;
-	struct ttm_validate_buffer val_buf;
-	uint32_t submit_size;
-	uint8_t *cmd;
-	int ret;
-
-	if (likely(res->id != -1))
-		return 0;
-
-	if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
-		     dev_priv->memory_size))
-		return -EBUSY;
-
-	/*
-	 * Reserve- and validate the backup DMA bo.
-	 */
-
-	if (srf->backup) {
-		INIT_LIST_HEAD(&val_list);
-		val_buf.bo = ttm_bo_reference(srf->backup);
-		val_buf.new_sync_obj_arg = (void *)((unsigned long)
-						    DRM_VMW_FENCE_FLAG_EXEC);
-		list_add_tail(&val_buf.head, &val_list);
-		ret = ttm_eu_reserve_buffers(&val_list);
-		if (unlikely(ret != 0))
-			goto out_no_reserve;
-
-		ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
-				      true, false, false);
-		if (unlikely(ret != 0))
-			goto out_no_validate;
-	}
-
-	/*
-	 * Alloc id for the resource.
-	 */
-
-	ret = vmw_resource_alloc_id(dev_priv, res);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed to allocate a surface id.\n");
-		goto out_no_id;
-	}
-	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
-		ret = -EBUSY;
-		goto out_no_fifo;
-	}
-
-
-	/*
-	 * Encode surface define- and dma commands.
-	 */
-
-	submit_size = vmw_surface_define_size(srf);
-	if (srf->backup)
-		submit_size += vmw_surface_dma_size(srf);
-
-	cmd = vmw_fifo_reserve(dev_priv, submit_size);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for surface "
-			  "validation.\n");
-		ret = -ENOMEM;
-		goto out_no_fifo;
-	}
-
-	vmw_surface_define_encode(srf, cmd);
-	if (srf->backup) {
-		SVGAGuestPtr ptr;
-
-		cmd += vmw_surface_define_size(srf);
-		vmw_bo_get_guest_ptr(srf->backup, &ptr);
-		vmw_surface_dma_encode(srf, cmd, &ptr, true);
-	}
-
-	vmw_fifo_commit(dev_priv, submit_size);
-
-	/*
-	 * Create a fence object and fence the backup buffer.
-	 */
-
-	if (srf->backup) {
-		struct vmw_fence_obj *fence;
-
-		(void) vmw_execbuf_fence_commands(NULL, dev_priv,
-						  &fence, NULL);
-		ttm_eu_fence_buffer_objects(&val_list, fence);
-		if (likely(fence != NULL))
-			vmw_fence_obj_unreference(&fence);
-		ttm_bo_unref(&val_buf.bo);
-		ttm_bo_unref(&srf->backup);
-	}
-
-	/*
-	 * Surface memory usage accounting.
-	 */
-
-	dev_priv->used_memory_size += srf->backup_size;
-
-	return 0;
-
-out_no_fifo:
-	vmw_resource_release_id(res);
-out_no_id:
-out_no_validate:
-	if (srf->backup)
-		ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
-	if (srf->backup)
-		ttm_bo_unref(&val_buf.bo);
-	return ret;
-}
-
-/**
- * vmw_surface_evict - Evict a hw surface.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface
- *
- * DMA the contents of a hw surface to a backup guest buffer object,
- * and destroy the hw surface, releasing its id.
- */
-int vmw_surface_evict(struct vmw_private *dev_priv,
-		      struct vmw_surface *srf)
-{
-	struct vmw_resource *res = &srf->res;
-	struct list_head val_list;
-	struct ttm_validate_buffer val_buf;
-	uint32_t submit_size;
-	uint8_t *cmd;
-	int ret;
-	struct vmw_fence_obj *fence;
-	SVGAGuestPtr ptr;
-
-	BUG_ON(res->id == -1);
-
-	/*
-	 * Create a surface backup buffer object.
-	 */
-
-	if (!srf->backup) {
-		ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
-				    ttm_bo_type_device,
-				    &vmw_srf_placement, 0, 0, true,
-				    NULL, &srf->backup);
-		if (unlikely(ret != 0))
-			return ret;
-	}
-
-	/*
-	 * Reserve- and validate the backup DMA bo.
-	 */
-
-	INIT_LIST_HEAD(&val_list);
-	val_buf.bo = ttm_bo_reference(srf->backup);
-	val_buf.new_sync_obj_arg = (void *)(unsigned long)
-		DRM_VMW_FENCE_FLAG_EXEC;
-	list_add_tail(&val_buf.head, &val_list);
-	ret = ttm_eu_reserve_buffers(&val_list);
-	if (unlikely(ret != 0))
-		goto out_no_reserve;
-
-	ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
-			      true, false, false);
-	if (unlikely(ret != 0))
-		goto out_no_validate;
-
-
-	/*
-	 * Encode the dma- and surface destroy commands.
-	 */
-
-	submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
-	cmd = vmw_fifo_reserve(dev_priv, submit_size);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for surface "
-			  "eviction.\n");
-		ret = -ENOMEM;
-		goto out_no_fifo;
-	}
-
-	vmw_bo_get_guest_ptr(srf->backup, &ptr);
-	vmw_surface_dma_encode(srf, cmd, &ptr, false);
-	cmd += vmw_surface_dma_size(srf);
-	vmw_surface_destroy_encode(res->id, cmd);
-	vmw_fifo_commit(dev_priv, submit_size);
-
-	/*
-	 * Surface memory usage accounting.
-	 */
-
-	dev_priv->used_memory_size -= srf->backup_size;
-
-	/*
-	 * Create a fence object and fence the DMA buffer.
-	 */
-
-	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
-					  &fence, NULL);
-	ttm_eu_fence_buffer_objects(&val_list, fence);
-	if (likely(fence != NULL))
-		vmw_fence_obj_unreference(&fence);
-	ttm_bo_unref(&val_buf.bo);
-
-	/*
-	 * Release the surface ID.
-	 */
-
-	vmw_resource_release_id(res);
-
-	return 0;
-
-out_no_fifo:
-out_no_validate:
-	if (srf->backup)
-		ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
-	ttm_bo_unref(&val_buf.bo);
-	ttm_bo_unref(&srf->backup);
-	return ret;
-}
-
-
-/**
- * vmw_surface_validate - make a surface available to the device, evicting
- * other surfaces if needed.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * Try to validate a surface and if it fails due to limited device resources,
- * repeatedly try to evict other surfaces until the request can be
- * acommodated.
- *
- * May return errors if out of resources.
- */
-int vmw_surface_validate(struct vmw_private *dev_priv,
-			 struct vmw_surface *srf)
-{
-	int ret;
-	struct vmw_surface *evict_srf;
-
-	do {
-		write_lock(&dev_priv->resource_lock);
-		list_del_init(&srf->lru_head);
-		write_unlock(&dev_priv->resource_lock);
-
-		ret = vmw_surface_do_validate(dev_priv, srf);
-		if (likely(ret != -EBUSY))
-			break;
-
-		write_lock(&dev_priv->resource_lock);
-		if (list_empty(&dev_priv->surface_lru)) {
-			DRM_ERROR("Out of device memory for surfaces.\n");
-			ret = -EBUSY;
-			write_unlock(&dev_priv->resource_lock);
-			break;
-		}
-
-		evict_srf = vmw_surface_reference
-			(list_first_entry(&dev_priv->surface_lru,
-					  struct vmw_surface,
-					  lru_head));
-		list_del_init(&evict_srf->lru_head);
-
-		write_unlock(&dev_priv->resource_lock);
-		(void) vmw_surface_evict(dev_priv, evict_srf);
-
-		vmw_surface_unreference(&evict_srf);
-
-	} while (1);
-
-	if (unlikely(ret != 0 && srf->res.id != -1)) {
-		write_lock(&dev_priv->resource_lock);
-		list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
-		write_unlock(&dev_priv->resource_lock);
-	}
+out_bad_resource:
+	ttm_base_object_unref(&base);
 
 	return ret;
 }
 
-
-/**
- * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
- *
- * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
- *
- * As part of the resource destruction, remove the surface from any
- * lookup lists.
- */
-static void vmw_surface_remove_from_lists(struct vmw_resource *res)
-{
-	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
-	list_del_init(&srf->lru_head);
-}
-
-int vmw_surface_init(struct vmw_private *dev_priv,
-		     struct vmw_surface *srf,
-		     void (*res_free) (struct vmw_resource *res))
-{
-	int ret;
-	struct vmw_resource *res = &srf->res;
-
-	BUG_ON(res_free == NULL);
-	INIT_LIST_HEAD(&srf->lru_head);
-	ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
-				VMW_RES_SURFACE, true, res_free,
-				vmw_surface_remove_from_lists);
-
-	if (unlikely(ret != 0))
-		res_free(res);
-
-	/*
-	 * The surface won't be visible to hardware until a
-	 * surface validate.
-	 */
-
-	(void) vmw_3d_resource_inc(dev_priv, false);
-	vmw_resource_activate(res, vmw_hw_surface_destroy);
-	return ret;
-}
-
-static void vmw_user_surface_free(struct vmw_resource *res)
-{
-	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-	struct vmw_user_surface *user_srf =
-	    container_of(srf, struct vmw_user_surface, srf);
-	struct vmw_private *dev_priv = srf->res.dev_priv;
-	uint32_t size = user_srf->size;
-
-	if (srf->backup)
-		ttm_bo_unref(&srf->backup);
-	kfree(srf->offsets);
-	kfree(srf->sizes);
-	kfree(srf->snooper.image);
-	kfree(user_srf);
-	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @list_head: list of resources to unreserve.
- *
- * Currently only surfaces are considered, and unreserving a surface
- * means putting it back on the device's surface lru list,
- * so that it can be evicted if necessary.
- * This function traverses the resource list and
- * checks whether resources are surfaces, and in that case puts them back
- * on the device's surface LRU list.
- */
-void vmw_resource_unreserve(struct list_head *list)
-{
-	struct vmw_resource *res;
-	struct vmw_surface *srf;
-	rwlock_t *lock = NULL;
-
-	list_for_each_entry(res, list, validate_head) {
-
-		if (res->res_free != &vmw_surface_res_free &&
-		    res->res_free != &vmw_user_surface_free)
-			continue;
-
-		if (unlikely(lock == NULL)) {
-			lock = &res->dev_priv->resource_lock;
-			write_lock(lock);
-		}
-
-		srf = container_of(res, struct vmw_surface, res);
-		list_del_init(&srf->lru_head);
-		list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
-	}
-
-	if (lock != NULL)
-		write_unlock(lock);
-}
-
 /**
  * Helper function that looks either a surface or dmabuf.
  *
@@ -1201,342 +330,24 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 			   struct vmw_surface **out_surf,
 			   struct vmw_dma_buffer **out_buf)
 {
+	struct vmw_resource *res;
 	int ret;
 
 	BUG_ON(*out_surf || *out_buf);
 
-	ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
-	if (!ret)
+	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+					      user_surface_converter,
+					      &res);
+	if (!ret) {
+		*out_surf = vmw_res_to_srf(res);
 		return 0;
+	}
 
+	*out_surf = NULL;
 	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
 	return ret;
 }
 
-
-int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
-				   struct ttm_object_file *tfile,
-				   uint32_t handle, struct vmw_surface **out)
-{
-	struct vmw_resource *res;
-	struct vmw_surface *srf;
-	struct vmw_user_surface *user_srf;
-	struct ttm_base_object *base;
-	int ret = -EINVAL;
-
-	base = ttm_base_object_lookup(tfile, handle);
-	if (unlikely(base == NULL))
-		return -EINVAL;
-
-	if (unlikely(base->object_type != VMW_RES_SURFACE))
-		goto out_bad_resource;
-
-	user_srf = container_of(base, struct vmw_user_surface, base);
-	srf = &user_srf->srf;
-	res = &srf->res;
-
-	read_lock(&dev_priv->resource_lock);
-
-	if (!res->avail || res->res_free != &vmw_user_surface_free) {
-		read_unlock(&dev_priv->resource_lock);
-		goto out_bad_resource;
-	}
-
-	kref_get(&res->kref);
-	read_unlock(&dev_priv->resource_lock);
-
-	*out = srf;
-	ret = 0;
-
-out_bad_resource:
-	ttm_base_object_unref(&base);
-
-	return ret;
-}
-
-static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
-{
-	struct ttm_base_object *base = *p_base;
-	struct vmw_user_surface *user_srf =
-	    container_of(base, struct vmw_user_surface, base);
-	struct vmw_resource *res = &user_srf->srf.res;
-
-	*p_base = NULL;
-	vmw_resource_unreference(&res);
-}
-
-int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv)
-{
-	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
-	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
-	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
-}
-
-int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv)
-{
-	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct vmw_user_surface *user_srf;
-	struct vmw_surface *srf;
-	struct vmw_resource *res;
-	struct vmw_resource *tmp;
-	union drm_vmw_surface_create_arg *arg =
-	    (union drm_vmw_surface_create_arg *)data;
-	struct drm_vmw_surface_create_req *req = &arg->req;
-	struct drm_vmw_surface_arg *rep = &arg->rep;
-	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-	struct drm_vmw_size __user *user_sizes;
-	int ret;
-	int i, j;
-	uint32_t cur_bo_offset;
-	struct drm_vmw_size *cur_size;
-	struct vmw_surface_offset *cur_offset;
-	uint32_t stride_bpp;
-	uint32_t bpp;
-	uint32_t num_sizes;
-	uint32_t size;
-	struct vmw_master *vmaster = vmw_master(file_priv->master);
-
-	if (unlikely(vmw_user_surface_size == 0))
-		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
-			128;
-
-	num_sizes = 0;
-	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
-		num_sizes += req->mip_levels[i];
-
-	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
-	    DRM_VMW_MAX_MIP_LEVELS)
-		return -EINVAL;
-
-	size = vmw_user_surface_size + 128 +
-		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
-		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
-
-	ret = ttm_read_lock(&vmaster->lock, true);
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-				   size, false, true);
-	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Out of graphics memory for surface"
-				  " creation.\n");
-		goto out_unlock;
-	}
-
-	user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
-	if (unlikely(user_srf == NULL)) {
-		ret = -ENOMEM;
-		goto out_no_user_srf;
-	}
-
-	srf = &user_srf->srf;
-	res = &srf->res;
-
-	srf->flags = req->flags;
-	srf->format = req->format;
-	srf->scanout = req->scanout;
-	srf->backup = NULL;
-
-	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
-	srf->num_sizes = num_sizes;
-	user_srf->size = size;
-
-	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
-	if (unlikely(srf->sizes == NULL)) {
-		ret = -ENOMEM;
-		goto out_no_sizes;
-	}
-	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
-			       GFP_KERNEL);
-	if (unlikely(srf->sizes == NULL)) {
-		ret = -ENOMEM;
-		goto out_no_offsets;
-	}
-
-	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
-	    req->size_addr;
-
-	ret = copy_from_user(srf->sizes, user_sizes,
-			     srf->num_sizes * sizeof(*srf->sizes));
-	if (unlikely(ret != 0)) {
-		ret = -EFAULT;
-		goto out_no_copy;
-	}
-
-	cur_bo_offset = 0;
-	cur_offset = srf->offsets;
-	cur_size = srf->sizes;
-
-	bpp = vmw_sf_bpp[srf->format].bpp;
-	stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-
-	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
-		for (j = 0; j < srf->mip_levels[i]; ++j) {
-			uint32_t stride =
-				(cur_size->width * stride_bpp + 7) >> 3;
-
-			cur_offset->face = i;
-			cur_offset->mip = j;
-			cur_offset->bo_offset = cur_bo_offset;
-			cur_bo_offset += stride * cur_size->height *
-				cur_size->depth * bpp / stride_bpp;
-			++cur_offset;
-			++cur_size;
-		}
-	}
-	srf->backup_size = cur_bo_offset;
-
-	if (srf->scanout &&
-	    srf->num_sizes == 1 &&
-	    srf->sizes[0].width == 64 &&
-	    srf->sizes[0].height == 64 &&
-	    srf->format == SVGA3D_A8R8G8B8) {
-
-		/* allocate image area and clear it */
-		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
-		if (!srf->snooper.image) {
-			DRM_ERROR("Failed to allocate cursor_image\n");
-			ret = -ENOMEM;
-			goto out_no_copy;
-		}
-	} else {
-		srf->snooper.image = NULL;
-	}
-	srf->snooper.crtc = NULL;
-
-	user_srf->base.shareable = false;
-	user_srf->base.tfile = NULL;
-
-	/**
-	 * From this point, the generic resource management functions
-	 * destroy the object on failure.
-	 */
-
-	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
-	if (unlikely(ret != 0))
-		goto out_unlock;
-
-	tmp = vmw_resource_reference(&srf->res);
-	ret = ttm_base_object_init(tfile, &user_srf->base,
-				   req->shareable, VMW_RES_SURFACE,
-				   &vmw_user_surface_base_release, NULL);
-
-	if (unlikely(ret != 0)) {
-		vmw_resource_unreference(&tmp);
-		vmw_resource_unreference(&res);
-		goto out_unlock;
-	}
-
-	rep->sid = user_srf->base.hash.key;
-	if (rep->sid == SVGA3D_INVALID_ID)
-		DRM_ERROR("Created bad Surface ID.\n");
-
-	vmw_resource_unreference(&res);
-
-	ttm_read_unlock(&vmaster->lock);
-	return 0;
-out_no_copy:
-	kfree(srf->offsets);
-out_no_offsets:
-	kfree(srf->sizes);
-out_no_sizes:
-	kfree(user_srf);
-out_no_user_srf:
-	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-out_unlock:
-	ttm_read_unlock(&vmaster->lock);
-	return ret;
-}
-
-int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
-				struct drm_file *file_priv)
-{
-	union drm_vmw_surface_reference_arg *arg =
-	    (union drm_vmw_surface_reference_arg *)data;
-	struct drm_vmw_surface_arg *req = &arg->req;
-	struct drm_vmw_surface_create_req *rep = &arg->rep;
-	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-	struct vmw_surface *srf;
-	struct vmw_user_surface *user_srf;
-	struct drm_vmw_size __user *user_sizes;
-	struct ttm_base_object *base;
-	int ret = -EINVAL;
-
-	base = ttm_base_object_lookup(tfile, req->sid);
-	if (unlikely(base == NULL)) {
-		DRM_ERROR("Could not find surface to reference.\n");
-		return -EINVAL;
-	}
-
-	if (unlikely(base->object_type != VMW_RES_SURFACE))
-		goto out_bad_resource;
-
-	user_srf = container_of(base, struct vmw_user_surface, base);
-	srf = &user_srf->srf;
-
-	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Could not add a reference to a surface.\n");
-		goto out_no_reference;
-	}
-
-	rep->flags = srf->flags;
-	rep->format = srf->format;
-	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
-	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
-	    rep->size_addr;
-
-	if (user_sizes)
-		ret = copy_to_user(user_sizes, srf->sizes,
-				   srf->num_sizes * sizeof(*srf->sizes));
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("copy_to_user failed %p %u\n",
-			  user_sizes, srf->num_sizes);
-		ret = -EFAULT;
-	}
-out_bad_resource:
-out_no_reference:
-	ttm_base_object_unref(&base);
-
-	return ret;
-}
-
-int vmw_surface_check(struct vmw_private *dev_priv,
-		      struct ttm_object_file *tfile,
-		      uint32_t handle, int *id)
-{
-	struct ttm_base_object *base;
-	struct vmw_user_surface *user_srf;
-
-	int ret = -EPERM;
-
-	base = ttm_base_object_lookup(tfile, handle);
-	if (unlikely(base == NULL))
-		return -EINVAL;
-
-	if (unlikely(base->object_type != VMW_RES_SURFACE))
-		goto out_bad_surface;
-
-	user_srf = container_of(base, struct vmw_user_surface, base);
-	*id = user_srf->srf.res.id;
-	ret = 0;
-
-out_bad_surface:
-	/**
-	 * FIXME: May deadlock here when called from the
-	 * command parsing code.
-	 */
-
-	ttm_base_object_unref(&base);
-	return ret;
-}
-
 /**
  * Buffer management.
  */
@@ -1562,11 +373,11 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
 	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
 	memset(vmw_bo, 0, sizeof(*vmw_bo));
 
-	INIT_LIST_HEAD(&vmw_bo->validate_list);
+	INIT_LIST_HEAD(&vmw_bo->res_list);
 
 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 			  ttm_bo_type_device, placement,
-			  0, 0, interruptible,
+			  0, interruptible,
 			  NULL, acc_size, NULL, bo_free);
 	return ret;
 }
@@ -1575,7 +386,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 {
 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 
-	kfree(vmw_user_bo);
+	ttm_base_object_kfree(vmw_user_bo, base);
 }
 
 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -1594,6 +405,79 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 	ttm_bo_unref(&bo);
 }
 
+/**
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
+ */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+			  struct ttm_object_file *tfile,
+			  uint32_t size,
+			  bool shareable,
+			  uint32_t *handle,
+			  struct vmw_dma_buffer **p_dma_buf)
+{
+	struct vmw_user_dma_buffer *user_bo;
+	struct ttm_buffer_object *tmp;
+	int ret;
+
+	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+	if (unlikely(user_bo == NULL)) {
+		DRM_ERROR("Failed to allocate a buffer.\n");
+		return -ENOMEM;
+	}
+
+	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+			      &vmw_vram_sys_placement, true,
+			      &vmw_user_dmabuf_destroy);
+	if (unlikely(ret != 0))
+		return ret;
+
+	tmp = ttm_bo_reference(&user_bo->dma.base);
+	ret = ttm_base_object_init(tfile,
+				   &user_bo->base,
+				   shareable,
+				   ttm_buffer_type,
+				   &vmw_user_dmabuf_release, NULL);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unref(&tmp);
+		goto out_no_base_object;
+	}
+
+	*p_dma_buf = &user_bo->dma;
+	*handle = user_bo->base.hash.key;
+
+out_no_base_object:
+	return ret;
+}
+
+/**
+ * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+				  struct ttm_object_file *tfile)
+{
+	struct vmw_user_dma_buffer *vmw_user_bo;
+
+	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+		return -EPERM;
+
+	vmw_user_bo = vmw_user_dma_buffer(bo);
+	return (vmw_user_bo->base.tfile == tfile ||
+	vmw_user_bo->base.shareable) ? 0 : -EPERM;
+}
+
 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file_priv)
 {
@@ -1602,44 +486,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
 	    (union drm_vmw_alloc_dmabuf_arg *)data;
 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
-	struct vmw_user_dma_buffer *vmw_user_bo;
-	struct ttm_buffer_object *tmp;
+	struct vmw_dma_buffer *dma_buf;
+	uint32_t handle;
 	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret;
 
-	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
-	if (unlikely(vmw_user_bo == NULL))
-		return -ENOMEM;
-
 	ret = ttm_read_lock(&vmaster->lock, true);
-	if (unlikely(ret != 0)) {
-		kfree(vmw_user_bo);
+	if (unlikely(ret != 0))
 		return ret;
-	}
 
-	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
-			      &vmw_vram_sys_placement, true,
-			      &vmw_user_dmabuf_destroy);
+	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+				    req->size, false, &handle, &dma_buf);
 	if (unlikely(ret != 0))
 		goto out_no_dmabuf;
 
-	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
-	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
-				   &vmw_user_bo->base,
-				   false,
-				   ttm_buffer_type,
-				   &vmw_user_dmabuf_release, NULL);
-	if (unlikely(ret != 0))
-		goto out_no_base_object;
-	else {
-		rep->handle = vmw_user_bo->base.hash.key;
-		rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
-		rep->cur_gmr_id = vmw_user_bo->base.hash.key;
-		rep->cur_gmr_offset = 0;
-	}
+	rep->handle = handle;
+	rep->map_handle = dma_buf->base.addr_space_offset;
+	rep->cur_gmr_id = handle;
+	rep->cur_gmr_offset = 0;
+
+	vmw_dmabuf_unreference(&dma_buf);
 
-out_no_base_object:
-	ttm_bo_unref(&tmp);
 out_no_dmabuf:
 	ttm_read_unlock(&vmaster->lock);
 
@@ -1657,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
 					 TTM_REF_USAGE);
 }
 
-uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
-				  uint32_t cur_validate_node)
-{
-	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-	if (likely(vmw_bo->on_validate_list))
-		return vmw_bo->cur_validate_node;
-
-	vmw_bo->cur_validate_node = cur_validate_node;
-	vmw_bo->on_validate_list = true;
-
-	return cur_validate_node;
-}
-
-void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
-{
-	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-	vmw_bo->on_validate_list = false;
-}
-
 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 			   uint32_t handle, struct vmw_dma_buffer **out)
 {
@@ -1706,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 	return 0;
 }
 
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+			      struct vmw_dma_buffer *dma_buf)
+{
+	struct vmw_user_dma_buffer *user_bo;
+
+	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+		return -EINVAL;
+
+	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+	return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+}
+
 /*
  * Stream management
  */
@@ -1730,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
 	struct vmw_resource *res = &stream->res;
 	int ret;
 
-	ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
-				VMW_RES_STREAM, false, res_free, NULL);
+	ret = vmw_resource_init(dev_priv, res, false, res_free,
+				&vmw_stream_func);
 
 	if (unlikely(ret != 0)) {
 		if (res_free == NULL)
@@ -1753,17 +611,13 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
 	return 0;
 }
 
-/**
- * User-space context management:
- */
-
 static void vmw_user_stream_free(struct vmw_resource *res)
 {
 	struct vmw_user_stream *stream =
 	    container_of(res, struct vmw_user_stream, stream.res);
 	struct vmw_private *dev_priv = res->dev_priv;
 
-	kfree(stream);
+	ttm_base_object_kfree(stream, base);
 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 			    vmw_user_stream_size);
 }
@@ -1792,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
 	struct vmw_user_stream *stream;
 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
 	int ret = 0;
 
-	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+
+	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
 	if (unlikely(res == NULL))
 		return -EINVAL;
 
@@ -1895,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 	struct vmw_resource *res;
 	int ret;
 
-	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+				  *inout_id);
 	if (unlikely(res == NULL))
 		return -EINVAL;
 
@@ -1990,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 					 handle, TTM_REF_USAGE);
 }
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+				  bool interruptible)
+{
+	unsigned long size =
+		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+	struct vmw_dma_buffer *backup;
+	int ret;
+
+	if (likely(res->backup)) {
+		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+		return 0;
+	}
+
+	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+	if (unlikely(backup == NULL))
+		return -ENOMEM;
+
+	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+			      res->func->backup_placement,
+			      interruptible,
+			      &vmw_dmabuf_bo_free);
+	if (unlikely(ret != 0))
+		goto out_no_dmabuf;
+
+	res->backup = backup;
+
+out_no_dmabuf:
+	return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ *                            to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ * @val_buf:        Information about a buffer possibly
+ *                  containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+				    struct ttm_validate_buffer *val_buf)
+{
+	int ret = 0;
+	const struct vmw_res_func *func = res->func;
+
+	if (unlikely(res->id == -1)) {
+		ret = func->create(res);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	if (func->bind &&
+	    ((func->needs_backup && list_empty(&res->mob_head) &&
+	      val_buf->bo != NULL) ||
+	     (!func->needs_backup && val_buf->bo != NULL))) {
+		ret = func->bind(res, val_buf);
+		if (unlikely(ret != 0))
+			goto out_bind_failed;
+		if (func->needs_backup)
+			list_add_tail(&res->mob_head, &res->backup->res_list);
+	}
+
+	/*
+	 * Only do this on write operations, and move to
+	 * vmw_resource_unreserve if it can be called after
+	 * backup buffers have been unreserved. Otherwise
+	 * sort out locking.
+	 */
+	res->res_dirty = true;
+
+	return 0;
+
+out_bind_failed:
+	func->destroy(res);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @new_backup:        Pointer to new backup buffer if command submission
+ *                     switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+			    struct vmw_dma_buffer *new_backup,
+			    unsigned long new_backup_offset)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	if (!list_empty(&res->lru_head))
+		return;
+
+	if (new_backup && new_backup != res->backup) {
+
+		if (res->backup) {
+			BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+			list_del_init(&res->mob_head);
+			vmw_dmabuf_unreference(&res->backup);
+		}
+
+		res->backup = vmw_dmabuf_reference(new_backup);
+		BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+		list_add_tail(&res->mob_head, &new_backup->res_list);
+	}
+	if (new_backup)
+		res->backup_offset = new_backup_offset;
+
+	if (!res->func->may_evict)
+		return;
+
+	write_lock(&dev_priv->resource_lock);
+	list_add_tail(&res->lru_head,
+		      &res->dev_priv->res_lru[res->func->res_type]);
+	write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ *                             for a resource and in that case, allocate
+ *                             one, reserve and validate it.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ * @val_buf:        On successful return contains data about the
+ *                  reserved and validated backup buffer.
+ */
+int vmw_resource_check_buffer(struct vmw_resource *res,
+			      bool interruptible,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct list_head val_list;
+	bool backup_dirty = false;
+	int ret;
+
+	if (unlikely(res->backup == NULL)) {
+		ret = vmw_resource_buf_alloc(res, interruptible);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	INIT_LIST_HEAD(&val_list);
+	val_buf->bo = ttm_bo_reference(&res->backup->base);
+	list_add_tail(&val_buf->head, &val_list);
+	ret = ttm_eu_reserve_buffers(&val_list);
+	if (unlikely(ret != 0))
+		goto out_no_reserve;
+
+	if (res->func->needs_backup && list_empty(&res->mob_head))
+		return 0;
+
+	backup_dirty = res->backup_dirty;
+	ret = ttm_bo_validate(&res->backup->base,
+			      res->func->backup_placement,
+			      true, false);
+
+	if (unlikely(ret != 0))
+		goto out_no_validate;
+
+	return 0;
+
+out_no_validate:
+	ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+	ttm_bo_unref(&val_buf->bo);
+	if (backup_dirty)
+		vmw_dmabuf_unreference(&res->backup);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res:            The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	write_lock(&dev_priv->resource_lock);
+	list_del_init(&res->lru_head);
+	write_unlock(&dev_priv->resource_lock);
+
+	if (res->func->needs_backup && res->backup == NULL &&
+	    !no_backup) {
+		ret = vmw_resource_buf_alloc(res, true);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ *                                    backup buffer
+ *.
+ * @val_buf:        Backup buffer information.
+ */
+void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+	struct list_head val_list;
+
+	if (likely(val_buf->bo == NULL))
+		return;
+
+	INIT_LIST_HEAD(&val_list);
+	list_add_tail(&val_buf->head, &val_list);
+	ttm_eu_backoff_reservation(&val_list);
+	ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ *                         to a backup buffer.
+ *
+ * @res:            The resource to evict.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res)
+{
+	struct ttm_validate_buffer val_buf;
+	const struct vmw_res_func *func = res->func;
+	int ret;
+
+	BUG_ON(!func->may_evict);
+
+	val_buf.bo = NULL;
+	ret = vmw_resource_check_buffer(res, true, &val_buf);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(func->unbind != NULL &&
+		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
+		ret = func->unbind(res, res->res_dirty, &val_buf);
+		if (unlikely(ret != 0))
+			goto out_no_unbind;
+		list_del_init(&res->mob_head);
+	}
+	ret = func->destroy(res);
+	res->backup_dirty = true;
+	res->res_dirty = false;
+out_no_unbind:
+	vmw_resource_backoff_reservation(&val_buf);
+
+	return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ *                         to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+	int ret;
+	struct vmw_resource *evict_res;
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+	struct ttm_validate_buffer val_buf;
+
+	if (likely(!res->func->may_evict))
+		return 0;
+
+	val_buf.bo = NULL;
+	if (res->backup)
+		val_buf.bo = &res->backup->base;
+	do {
+		ret = vmw_resource_do_validate(res, &val_buf);
+		if (likely(ret != -EBUSY))
+			break;
+
+		write_lock(&dev_priv->resource_lock);
+		if (list_empty(lru_list) || !res->func->may_evict) {
+			DRM_ERROR("Out of device device id entries "
+				  "for %s.\n", res->func->type_name);
+			ret = -EBUSY;
+			write_unlock(&dev_priv->resource_lock);
+			break;
+		}
+
+		evict_res = vmw_resource_reference
+			(list_first_entry(lru_list, struct vmw_resource,
+					  lru_head));
+		list_del_init(&evict_res->lru_head);
+
+		write_unlock(&dev_priv->resource_lock);
+		vmw_resource_do_evict(evict_res);
+		vmw_resource_unreference(&evict_res);
+	} while (1);
+
+	if (unlikely(ret != 0))
+		goto out_no_validate;
+	else if (!res->func->needs_backup && res->backup) {
+		list_del_init(&res->mob_head);
+		vmw_dmabuf_unreference(&res->backup);
+	}
+
+	return 0;
+
+out_no_validate:
+	return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ *                       object without unreserving it.
+ *
+ * @bo:             Pointer to the struct ttm_buffer_object to fence.
+ * @fence:          Pointer to the fence. If NULL, this function will
+ *                  insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+			 struct vmw_fence_obj *fence)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
+	struct vmw_fence_obj *old_fence_obj;
+	struct vmw_private *dev_priv =
+		container_of(bdev, struct vmw_private, bdev);
+
+	if (fence == NULL)
+		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	else
+		driver->sync_obj_ref(fence);
+
+	spin_lock(&bdev->fence_lock);
+
+	old_fence_obj = bo->sync_obj;
+	bo->sync_obj = fence;
+
+	spin_unlock(&bdev->fence_lock);
+
+	if (old_fence_obj)
+		vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo:             The TTM buffer object about to move.
+ * @mem:            The truct ttm_mem_reg indicating to what memory
+ *                  region the move is taking place.
+ *
+ * For now does nothing.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+			      struct ttm_mem_reg *mem)
+{
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res:            The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+	return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv:       Pointer to a device private struct
+ * @type:           The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+				    enum vmw_res_type type)
+{
+	struct list_head *lru_list = &dev_priv->res_lru[type];
+	struct vmw_resource *evict_res;
+
+	do {
+		write_lock(&dev_priv->resource_lock);
+
+		if (list_empty(lru_list))
+			goto out_unlock;
+
+		evict_res = vmw_resource_reference(
+			list_first_entry(lru_list, struct vmw_resource,
+					 lru_head));
+		list_del_init(&evict_res->lru_head);
+		write_unlock(&dev_priv->resource_lock);
+		vmw_resource_do_evict(evict_res);
+		vmw_resource_unreference(&evict_res);
+	} while (1);
+
+out_unlock:
+	write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv:       Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+	enum vmw_res_type type;
+
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+
+	for (type = 0; type < vmw_res_max; ++type)
+		vmw_resource_evict_type(dev_priv, type);
+
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644
index 000000000000..f3adeed2854c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_RESOURCE_PRIV_H_
+#define _VMWGFX_RESOURCE_PRIV_H_
+
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+	enum ttm_object_type object_type;
+	struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+	void (*res_free) (struct vmw_resource *res);
+};
+
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type:          Enum that identifies the lru list to use for eviction.
+ * @needs_backup:      Whether the resource is guest-backed and needs
+ *                     persistent buffer storage.
+ * @type_name:         String that identifies the resource type.
+ * @backup_placement:  TTM placement for backup buffers.
+ * @may_evict          Whether the resource may be evicted.
+ * @create:            Create a hardware resource.
+ * @destroy:           Destroy a hardware resource.
+ * @bind:              Bind a hardware resource to persistent buffer storage.
+ * @unbind:            Unbind a hardware resource from persistent
+ *                     buffer storage.
+ */
+
+struct vmw_res_func {
+	enum vmw_res_type res_type;
+	bool needs_backup;
+	const char *type_name;
+	struct ttm_placement *backup_placement;
+	bool may_evict;
+
+	int (*create) (struct vmw_resource *res);
+	int (*destroy) (struct vmw_resource *res);
+	int (*bind) (struct vmw_resource *res,
+		     struct ttm_validate_buffer *val_buf);
+	int (*unbind) (struct vmw_resource *res,
+		       bool readback,
+		       struct ttm_validate_buffer *val_buf);
+};
+
+int vmw_resource_alloc_id(struct vmw_resource *res);
+void vmw_resource_release_id(struct vmw_resource *res);
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+		      bool delay_id,
+		      void (*res_free) (struct vmw_resource *res),
+		      const struct vmw_res_func *func);
+void vmw_resource_activate(struct vmw_resource *res,
+			   void (*hw_destroy) (struct vmw_resource *));
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 6deaf2f8bab1..26387c3d5a21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
-	drm_connector_attach_property(connector,
+	drm_object_attach_property(&connector->base,
 				      dev->mode_config.dirty_info_property,
 				      1);
 
@@ -485,7 +485,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
 		return -EINVAL;
 	}
 
-	if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) {
+	if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
 		DRM_INFO("Not using screen objects,"
 			 " missing cap SCREEN_OBJECT_2\n");
 		return -ENOSYS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644
index 000000000000..582814339748
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -0,0 +1,893 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "svga3d_surfacedefs.h"
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base:           The TTM base object handling user-space visibility.
+ * @srf:            The surface metadata.
+ * @size:           TTM accounting size for the surface.
+ */
+struct vmw_user_surface {
+	struct ttm_base_object base;
+	struct vmw_surface srf;
+	uint32_t size;
+	uint32_t backup_handle;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face:           Surface face.
+ * @mip:            Mip level.
+ * @bo_offset:      Offset into backing store of this mip level.
+ *
+ */
+struct vmw_surface_offset {
+	uint32_t face;
+	uint32_t mip;
+	uint32_t bo_offset;
+};
+
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+	.object_type = VMW_RES_SURFACE,
+	.base_obj_to_res = vmw_user_surface_base_to_res,
+	.res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+	&user_surface_conv;
+
+
+static uint64_t vmw_user_surface_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+	.res_type = vmw_res_surface,
+	.needs_backup = false,
+	.may_evict = true,
+	.type_name = "legacy surfaces",
+	.backup_placement = &vmw_srf_placement,
+	.create = &vmw_legacy_srf_create,
+	.destroy = &vmw_legacy_srf_destroy,
+	.bind = &vmw_legacy_srf_bind,
+	.unbind = &vmw_legacy_srf_unbind
+};
+
+/**
+ * struct vmw_surface_dma - SVGA3D DMA command
+ */
+struct vmw_surface_dma {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdSurfaceDMA body;
+	SVGA3dCopyBox cb;
+	SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
+struct vmw_surface_define {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDefineSurface body;
+};
+
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
+struct vmw_surface_destroy {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+	return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+	return sizeof(struct vmw_surface_define) + srf->num_sizes *
+		sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+	return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+				       void *cmd_space)
+{
+	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+		cmd_space;
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+				      void *cmd_space)
+{
+	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+		cmd_space;
+	struct drm_vmw_size *src_size;
+	SVGA3dSize *cmd_size;
+	uint32_t cmd_len;
+	int i;
+
+	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+	cmd->header.size = cmd_len;
+	cmd->body.sid = srf->res.id;
+	cmd->body.surfaceFlags = srf->flags;
+	cmd->body.format = cpu_to_le32(srf->format);
+	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+	cmd += 1;
+	cmd_size = (SVGA3dSize *) cmd;
+	src_size = srf->sizes;
+
+	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+		cmd_size->width = src_size->width;
+		cmd_size->height = src_size->height;
+		cmd_size->depth = src_size->depth;
+	}
+}
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+				   void *cmd_space,
+				   const SVGAGuestPtr *ptr,
+				   bool to_surface)
+{
+	uint32_t i;
+	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+	const struct svga3d_surface_desc *desc =
+		svga3dsurface_get_desc(srf->format);
+
+	for (i = 0; i < srf->num_sizes; ++i) {
+		SVGA3dCmdHeader *header = &cmd->header;
+		SVGA3dCmdSurfaceDMA *body = &cmd->body;
+		SVGA3dCopyBox *cb = &cmd->cb;
+		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+		const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+		header->id = SVGA_3D_CMD_SURFACE_DMA;
+		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+		body->guest.ptr = *ptr;
+		body->guest.ptr.offset += cur_offset->bo_offset;
+		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
+								  cur_size);
+		body->host.sid = srf->res.id;
+		body->host.face = cur_offset->face;
+		body->host.mipmap = cur_offset->mip;
+		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
+				  SVGA3D_READ_HOST_VRAM);
+		cb->x = 0;
+		cb->y = 0;
+		cb->z = 0;
+		cb->srcx = 0;
+		cb->srcy = 0;
+		cb->srcz = 0;
+		cb->w = cur_size->width;
+		cb->h = cur_size->height;
+		cb->d = cur_size->depth;
+
+		suffix->suffixSize = sizeof(*suffix);
+		suffix->maximumOffset =
+			svga3dsurface_get_image_buffer_size(desc, cur_size,
+							    body->guest.pitch);
+		suffix->flags.discard = 0;
+		suffix->flags.unsynchronized = 0;
+		suffix->flags.reserved = 0;
+		++cmd;
+	}
+};
+
+
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res:        Pointer to a struct vmw_resource embedded in a struct
+ *              vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf;
+	void *cmd;
+
+	if (res->id != -1) {
+
+		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+		if (unlikely(cmd == NULL)) {
+			DRM_ERROR("Failed reserving FIFO space for surface "
+				  "destruction.\n");
+			return;
+		}
+
+		vmw_surface_destroy_encode(res->id, cmd);
+		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+		/*
+		 * used_memory_size_atomic, or separate lock
+		 * to avoid taking dev_priv::cmdbuf_mutex in
+		 * the destroy path.
+		 */
+
+		mutex_lock(&dev_priv->cmdbuf_mutex);
+		srf = vmw_res_to_srf(res);
+		dev_priv->used_memory_size -= res->backup_size;
+		mutex_unlock(&dev_priv->cmdbuf_mutex);
+	}
+	vmw_3d_resource_dec(dev_priv, false);
+}
+
+/**
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
+ *
+ * @res: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+static int vmw_legacy_srf_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf;
+	uint32_t submit_size;
+	uint8_t *cmd;
+	int ret;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	srf = vmw_res_to_srf(res);
+	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+		     dev_priv->memory_size))
+		return -EBUSY;
+
+	/*
+	 * Alloc id for the resource.
+	 */
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a surface id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	/*
+	 * Encode surface define- commands.
+	 */
+
+	submit_size = vmw_surface_define_size(srf);
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "creation.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	vmw_surface_define_encode(srf, cmd);
+	vmw_fifo_commit(dev_priv, submit_size);
+	/*
+	 * Surface memory usage accounting.
+	 */
+
+	dev_priv->used_memory_size += res->backup_size;
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+/**
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ * @bind:           Boolean wether to DMA to the surface.
+ *
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
+ */
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf,
+			      bool bind)
+{
+	SVGAGuestPtr ptr;
+	struct vmw_fence_obj *fence;
+	uint32_t submit_size;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
+	uint8_t *cmd;
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	BUG_ON(val_buf->bo == NULL);
+
+	submit_size = vmw_surface_dma_size(srf);
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "DMA.\n");
+		return -ENOMEM;
+	}
+	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_fence_single_bo(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ *                       surface validation process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	if (!res->backup_dirty)
+		return 0;
+
+	return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ *                         surface eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	if (unlikely(readback))
+		return vmw_legacy_srf_dma(res, val_buf, false);
+	return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ *                          resource eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+	BUG_ON(res->id == -1);
+
+	/*
+	 * Encode the dma- and surface destroy commands.
+	 */
+
+	submit_size = vmw_surface_destroy_size();
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "eviction.\n");
+		return -ENOMEM;
+	}
+
+	vmw_surface_destroy_encode(res->id, cmd);
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	/*
+	 * Surface memory usage accounting.
+	 */
+
+	dev_priv->used_memory_size -= res->backup_size;
+
+	/*
+	 * Release the surface ID.
+	 */
+
+	vmw_resource_release_id(res);
+
+	return 0;
+}
+
+
+/**
+ * vmw_surface_init - initialize a struct vmw_surface
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @srf:            Pointer to the struct vmw_surface to initialize.
+ * @res_free:       Pointer to a resource destructor used to free
+ *                  the object.
+ */
+static int vmw_surface_init(struct vmw_private *dev_priv,
+			    struct vmw_surface *srf,
+			    void (*res_free) (struct vmw_resource *res))
+{
+	int ret;
+	struct vmw_resource *res = &srf->res;
+
+	BUG_ON(res_free == NULL);
+	(void) vmw_3d_resource_inc(dev_priv, false);
+	ret = vmw_resource_init(dev_priv, res, true, res_free,
+				&vmw_legacy_surface_func);
+
+	if (unlikely(ret != 0)) {
+		vmw_3d_resource_dec(dev_priv, false);
+		res_free(res);
+		return ret;
+	}
+
+	/*
+	 * The surface won't be visible to hardware until a
+	 * surface validate.
+	 */
+
+	vmw_resource_activate(res, vmw_hw_surface_destroy);
+	return ret;
+}
+
+/**
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ *                                user visible surfaces
+ *
+ * @base:           Pointer to a TTM base object
+ *
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
+ */
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
+{
+	return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res:            A struct vmw_resource embedded in a struct vmw_surface.
+ */
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+	struct vmw_surface *srf = vmw_res_to_srf(res);
+	struct vmw_user_surface *user_srf =
+	    container_of(srf, struct vmw_user_surface, srf);
+	struct vmw_private *dev_priv = srf->res.dev_priv;
+	uint32_t size = user_srf->size;
+
+	kfree(srf->offsets);
+	kfree(srf->sizes);
+	kfree(srf->snooper.image);
+	ttm_base_object_kfree(user_srf, base);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface TTM base object destructor
+ *
+ * @p_base:         Pointer to a pointer to a TTM base object
+ *                  embedded in a struct vmw_user_surface.
+ *
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
+ */
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_surface *user_srf =
+	    container_of(base, struct vmw_user_surface, base);
+	struct vmw_resource *res = &user_srf->srf.res;
+
+	*p_base = NULL;
+	vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ *                                  the user surface destroy functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface define functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_surface *user_srf;
+	struct vmw_surface *srf;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	union drm_vmw_surface_create_arg *arg =
+	    (union drm_vmw_surface_create_arg *)data;
+	struct drm_vmw_surface_create_req *req = &arg->req;
+	struct drm_vmw_surface_arg *rep = &arg->rep;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct drm_vmw_size __user *user_sizes;
+	int ret;
+	int i, j;
+	uint32_t cur_bo_offset;
+	struct drm_vmw_size *cur_size;
+	struct vmw_surface_offset *cur_offset;
+	uint32_t num_sizes;
+	uint32_t size;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	const struct svga3d_surface_desc *desc;
+
+	if (unlikely(vmw_user_surface_size == 0))
+		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+			128;
+
+	num_sizes = 0;
+	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+		num_sizes += req->mip_levels[i];
+
+	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+	    DRM_VMW_MAX_MIP_LEVELS)
+		return -EINVAL;
+
+	size = vmw_user_surface_size + 128 +
+		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+	desc = svga3dsurface_get_desc(req->format);
+	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+		DRM_ERROR("Invalid surface format for surface creation.\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   size, false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for surface"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+	if (unlikely(user_srf == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_user_srf;
+	}
+
+	srf = &user_srf->srf;
+	res = &srf->res;
+
+	srf->flags = req->flags;
+	srf->format = req->format;
+	srf->scanout = req->scanout;
+
+	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+	srf->num_sizes = num_sizes;
+	user_srf->size = size;
+
+	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+	if (unlikely(srf->sizes == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_sizes;
+	}
+	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+			       GFP_KERNEL);
+	if (unlikely(srf->sizes == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_offsets;
+	}
+
+	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+	    req->size_addr;
+
+	ret = copy_from_user(srf->sizes, user_sizes,
+			     srf->num_sizes * sizeof(*srf->sizes));
+	if (unlikely(ret != 0)) {
+		ret = -EFAULT;
+		goto out_no_copy;
+	}
+
+	srf->base_size = *srf->sizes;
+	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+	srf->multisample_count = 1;
+
+	cur_bo_offset = 0;
+	cur_offset = srf->offsets;
+	cur_size = srf->sizes;
+
+	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+		for (j = 0; j < srf->mip_levels[i]; ++j) {
+			uint32_t stride = svga3dsurface_calculate_pitch
+				(desc, cur_size);
+
+			cur_offset->face = i;
+			cur_offset->mip = j;
+			cur_offset->bo_offset = cur_bo_offset;
+			cur_bo_offset += svga3dsurface_get_image_buffer_size
+				(desc, cur_size, stride);
+			++cur_offset;
+			++cur_size;
+		}
+	}
+	res->backup_size = cur_bo_offset;
+	if (srf->scanout &&
+	    srf->num_sizes == 1 &&
+	    srf->sizes[0].width == 64 &&
+	    srf->sizes[0].height == 64 &&
+	    srf->format == SVGA3D_A8R8G8B8) {
+
+		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+		/* clear the image */
+		if (srf->snooper.image) {
+			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+		} else {
+			DRM_ERROR("Failed to allocate cursor_image\n");
+			ret = -ENOMEM;
+			goto out_no_copy;
+		}
+	} else {
+		srf->snooper.image = NULL;
+	}
+	srf->snooper.crtc = NULL;
+
+	user_srf->base.shareable = false;
+	user_srf->base.tfile = NULL;
+
+	/**
+	 * From this point, the generic resource management functions
+	 * destroy the object on failure.
+	 */
+
+	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	tmp = vmw_resource_reference(&srf->res);
+	ret = ttm_base_object_init(tfile, &user_srf->base,
+				   req->shareable, VMW_RES_SURFACE,
+				   &vmw_user_surface_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		vmw_resource_unreference(&res);
+		goto out_unlock;
+	}
+
+	rep->sid = user_srf->base.hash.key;
+	vmw_resource_unreference(&res);
+
+	ttm_read_unlock(&vmaster->lock);
+	return 0;
+out_no_copy:
+	kfree(srf->offsets);
+out_no_offsets:
+	kfree(srf->sizes);
+out_no_sizes:
+	ttm_base_object_kfree(user_srf, base);
+out_no_user_srf:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface reference functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	union drm_vmw_surface_reference_arg *arg =
+	    (union drm_vmw_surface_reference_arg *)data;
+	struct drm_vmw_surface_arg *req = &arg->req;
+	struct drm_vmw_surface_create_req *rep = &arg->rep;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_surface *srf;
+	struct vmw_user_surface *user_srf;
+	struct drm_vmw_size __user *user_sizes;
+	struct ttm_base_object *base;
+	int ret = -EINVAL;
+
+	base = ttm_base_object_lookup(tfile, req->sid);
+	if (unlikely(base == NULL)) {
+		DRM_ERROR("Could not find surface to reference.\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(base->object_type != VMW_RES_SURFACE))
+		goto out_bad_resource;
+
+	user_srf = container_of(base, struct vmw_user_surface, base);
+	srf = &user_srf->srf;
+
+	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not add a reference to a surface.\n");
+		goto out_no_reference;
+	}
+
+	rep->flags = srf->flags;
+	rep->format = srf->format;
+	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+	    rep->size_addr;
+
+	if (user_sizes)
+		ret = copy_to_user(user_sizes, srf->sizes,
+				   srf->num_sizes * sizeof(*srf->sizes));
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("copy_to_user failed %p %u\n",
+			  user_sizes, srf->num_sizes);
+		ret = -EFAULT;
+	}
+out_bad_resource:
+out_no_reference:
+	ttm_base_object_unref(&base);
+
+	return ret;
+}
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index e25cf31faab2..fa60add0ff63 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -18,7 +18,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/dmi.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/fs.h>
@@ -376,7 +375,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
 			     size_t cnt, loff_t *ppos)
 {
 	char usercmd[64];
-	const char *pdev_name;
 	int ret;
 	bool delay = false, can_switch;
 	bool just_mux = false;
@@ -468,7 +466,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
 		goto out;
 
 	if (can_switch) {
-		pdev_name = pci_name(client->pdev);
 		ret = vga_switchto_stage1(client);
 		if (ret)
 			printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
@@ -540,7 +537,6 @@ fail:
 int vga_switcheroo_process_delayed_switch(void)
 {
 	struct vga_switcheroo_client *client;
-	const char *pdev_name;
 	int ret;
 	int err = -EINVAL;
 
@@ -555,7 +551,6 @@ int vga_switcheroo_process_delayed_switch(void)
 	if (!client || !check_can_switch())
 		goto err;
 
-	pdev_name = pci_name(client->pdev);
 	ret = vga_switchto_stage2(client);
 	if (ret)
 		printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
@@ -567,4 +562,3 @@ err:
 	return err;
 }
 EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
-
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 3fd82809b2d4..fad21c927a38 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1431,6 +1431,8 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 				     struct timeval *vblanktime);
+extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
+				     struct drm_pending_vblank_event *e);
 extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
@@ -1503,6 +1505,7 @@ extern unsigned int drm_debug;
 
 extern unsigned int drm_vblank_offdelay;
 extern unsigned int drm_timestamp_precision;
+extern unsigned int drm_timestamp_monotonic;
 
 extern struct class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3fa18b7e9497..00d78b5161c0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -792,6 +792,7 @@ struct drm_mode_config {
 
 	/* output poll support */
 	bool poll_enabled;
+	bool poll_running;
 	struct delayed_work output_poll_work;
 
 	/* pointers to standard properties */
@@ -887,14 +888,14 @@ extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_
 extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
 extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
 						   const struct drm_display_mode *mode);
-extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
 extern void drm_mode_config_init(struct drm_device *dev);
 extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
-extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
-extern int drm_mode_width(struct drm_display_mode *mode);
-extern int drm_mode_height(struct drm_display_mode *mode);
+extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern int drm_mode_width(const struct drm_display_mode *mode);
+extern int drm_mode_height(const struct drm_display_mode *mode);
 
 /* for us by fb module */
 extern int drm_mode_attachmode_crtc(struct drm_device *dev,
@@ -919,12 +920,6 @@ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
 extern void drm_mode_connector_list_update(struct drm_connector *connector);
 extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
 						struct edid *edid);
-extern int drm_connector_property_set_value(struct drm_connector *connector,
-					 struct drm_property *property,
-					 uint64_t value);
-extern int drm_connector_property_get_value(struct drm_connector *connector,
-					 struct drm_property *property,
-					 uint64_t *value);
 extern int drm_object_property_set_value(struct drm_mode_object *obj,
 					 struct drm_property *property,
 					 uint64_t val);
@@ -946,8 +941,6 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
 extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
 extern bool drm_crtc_in_use(struct drm_crtc *crtc);
 
-extern void drm_connector_attach_property(struct drm_connector *connector,
-					  struct drm_property *property, uint64_t init_val);
 extern void drm_object_attach_property(struct drm_mode_object *obj,
 				       struct drm_property *property,
 				       uint64_t init_val);
@@ -1037,6 +1030,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
 extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 				    void *data, struct drm_file *file_priv);
 extern u8 *drm_find_cea_extension(struct edid *edid);
+extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
 extern bool drm_detect_hdmi_monitor(struct edid *edid);
 extern bool drm_detect_monitor_audio(struct edid *edid);
 extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
@@ -1053,6 +1047,7 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
 				int GTF_2C, int GTF_K, int GTF_2J);
 extern int drm_add_modes_noedid(struct drm_connector *connector,
 				int hdisplay, int vdisplay);
+extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
 
 extern int drm_edid_header_is_valid(const u8 *raw_edid);
 extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index e01cc80c9c30..f43d556bf40b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -137,6 +137,8 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
 extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
+extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
 extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
 					  struct drm_mode_fb_cmd2 *mode_cmd);
 
@@ -162,6 +164,7 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev);
 extern void drm_kms_helper_poll_init(struct drm_device *dev);
 extern void drm_kms_helper_poll_fini(struct drm_device *dev);
 extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index fe061489f91f..e8e1417af3d9 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -25,6 +25,7 @@
 
 #include <linux/types.h>
 #include <linux/i2c.h>
+#include <linux/delay.h>
 
 /*
  * Unless otherwise noted, all values are from the DP 1.1a spec.  Note that
@@ -311,6 +312,14 @@
 #define MODE_I2C_READ	4
 #define MODE_I2C_STOP	8
 
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ * 				 aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ * 	     the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
 struct i2c_algo_dp_aux_data {
 	bool running;
 	u16 address;
@@ -322,4 +331,34 @@ struct i2c_algo_dp_aux_data {
 int
 i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
 
+
+#define DP_LINK_STATUS_SIZE	   6
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			  int lane_count);
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			      int lane_count);
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+				     int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+					  int lane);
+
+#define DP_RECEIVER_CAP_SIZE	0xf
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+static inline int
+drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+	return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
+
+static inline u8
+drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+	return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index 3650d5d011ee..fce2ef3fdfff 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -61,5 +61,19 @@ extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
 extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
 extern void drm_ht_remove(struct drm_open_hash *ht);
 
+/*
+ * RCU-safe interface
+ *
+ * The user of this API needs to make sure that two or more instances of the
+ * hash table manipulation functions are never run simultaneously.
+ * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
+ * with any of the manipulation functions as long as it's called from within
+ * an RCU read-locked section.
+ */
+#define drm_ht_insert_item_rcu drm_ht_insert_item
+#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
+#define drm_ht_remove_key_rcu drm_ht_remove_key
+#define drm_ht_remove_item_rcu drm_ht_remove_item
+#define drm_ht_find_item_rcu drm_ht_find_item
 
 #endif
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 3c13a3a4b158..808dad29607a 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -85,4 +85,30 @@ struct exynos_drm_hdmi_pdata {
 	int (*get_hpd)(void);
 };
 
+/**
+ * Platform Specific Structure for DRM based IPP.
+ *
+ * @inv_pclk: if set 1. invert pixel clock
+ * @inv_vsync: if set 1. invert vsync signal for wb
+ * @inv_href: if set 1. invert href signal
+ * @inv_hsync: if set 1. invert hsync signal for wb
+ */
+struct exynos_drm_ipp_pol {
+	unsigned int inv_pclk;
+	unsigned int inv_vsync;
+	unsigned int inv_href;
+	unsigned int inv_hsync;
+};
+
+/**
+ * Platform Specific Structure for DRM based FIMC.
+ *
+ * @pol: current hardware block polarity settings.
+ * @clk_rate: current hardware clock rate.
+ */
+struct exynos_drm_fimc_pdata {
+	struct exynos_drm_ipp_pol pol;
+	int clk_rate;
+};
+
 #endif	/* _EXYNOS_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 2e37e9f02e71..6eb76a1f11ab 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,7 +3,7 @@
 #ifndef _DRM_INTEL_GTT_H
 #define	_DRM_INTEL_GTT_H
 
-const struct intel_gtt {
+struct intel_gtt {
 	/* Size of memory reserved for graphics by the BIOS */
 	unsigned int stolen_size;
 	/* Total number of gtt entries. */
@@ -17,6 +17,7 @@ const struct intel_gtt {
 	unsigned int do_idle_maps : 1;
 	/* Share the scratch page dma with ppgtts. */
 	dma_addr_t scratch_page_dma;
+	struct page *scratch_page;
 	/* for ppgtt PDE access */
 	u32 __iomem *gtt;
 	/* needed for ioremap in drm/i915 */
@@ -39,10 +40,6 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
 #define AGP_DCACHE_MEMORY	1
 #define AGP_PHYS_MEMORY		2
 
-/* New caching attributes for gen6/sandybridge */
-#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
-#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
-
 /* flag for GFDT type */
 #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
 
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index e8028ade567f..3cb5d848fb66 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -141,8 +141,6 @@ struct ttm_tt;
  * struct ttm_buffer_object
  *
  * @bdev: Pointer to the buffer object device structure.
- * @buffer_start: The virtual user-space start address of ttm_bo_type_user
- * buffers.
  * @type: The bo type.
  * @destroy: Destruction function. If NULL, kfree is used.
  * @num_pages: Actual number of pages.
@@ -172,7 +170,6 @@ struct ttm_tt;
  * @seq_valid: The value of @val_seq is valid. This value is protected by
  * the bo_device::lru_lock.
  * @reserved: Deadlock-free lock used for synchronization state transitions.
- * @sync_obj_arg: Opaque argument to synchronization object function.
  * @sync_obj: Pointer to a synchronization object.
  * @priv_flags: Flags describing buffer object internal state.
  * @vm_rb: Rb node for the vm rb tree.
@@ -200,7 +197,6 @@ struct ttm_buffer_object {
 
 	struct ttm_bo_global *glob;
 	struct ttm_bo_device *bdev;
-	unsigned long buffer_start;
 	enum ttm_bo_type type;
 	void (*destroy) (struct ttm_buffer_object *);
 	unsigned long num_pages;
@@ -255,7 +251,6 @@ struct ttm_buffer_object {
 	 * checking NULL while reserved but not holding the mentioned lock.
 	 */
 
-	void *sync_obj_arg;
 	void *sync_obj;
 	unsigned long priv_flags;
 
@@ -342,7 +337,6 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  * @bo: The buffer object.
  * @placement: Proposed placement for the buffer object.
  * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Changes placement and caching policy of the buffer object
@@ -355,7 +349,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  */
 extern int ttm_bo_validate(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement,
-				bool interruptible, bool no_wait_reserve,
+				bool interruptible,
 				bool no_wait_gpu);
 
 /**
@@ -429,8 +423,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
  * @no_wait: Return immediately if buffer is busy.
  *
  * Synchronizes a buffer object for CPU RW access. This means
- * blocking command submission that affects the buffer and
- * waiting for buffer idle. This lock is recursive.
+ * command submission that affects the buffer will return -EBUSY
+ * until ttm_bo_synccpu_write_release is called.
+ *
  * Returns
  * -EBUSY if the buffer is busy and no_wait is true.
  * -ERESTARTSYS if interrupted by a signal.
@@ -472,8 +467,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
  * @type: Requested type of buffer object.
  * @flags: Initial placement flags.
  * @page_alignment: Data alignment in pages.
- * @buffer_start: Virtual address of user space data backing a
- * user buffer object.
  * @interruptible: If needing to sleep to wait for GPU resources,
  * sleep interruptible.
  * @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -505,7 +498,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
 			enum ttm_bo_type type,
 			struct ttm_placement *placement,
 			uint32_t page_alignment,
-			unsigned long buffer_start,
 			bool interrubtible,
 			struct file *persistent_swap_storage,
 			size_t acc_size,
@@ -521,8 +513,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
  * @type: Requested type of buffer object.
  * @flags: Initial placement flags.
  * @page_alignment: Data alignment in pages.
- * @buffer_start: Virtual address of user space data backing a
- * user buffer object.
  * @interruptible: If needing to sleep while waiting for GPU resources,
  * sleep interruptible.
  * @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -545,7 +535,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
 				enum ttm_bo_type type,
 				struct ttm_placement *placement,
 				uint32_t page_alignment,
-				unsigned long buffer_start,
 				bool interruptible,
 				struct file *persistent_swap_storage,
 				struct ttm_buffer_object **p_bo);
@@ -736,4 +725,18 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
 
 extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
 
+/**
+ * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
+ *
+ * @bo:     The buffer object to check.
+ *
+ * This function returns an indication if a bo is reserved or not, and should
+ * only be used to print an error when it is not from incorrect api usage, since
+ * there's no guarantee that it is the caller that is holding the reservation.
+ */
+static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
+{
+	return atomic_read(&bo->reserved);
+}
+
 #endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d803b92b0324..e3a43a47d78c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -394,7 +394,7 @@ struct ttm_bo_driver {
 	 */
 	int (*move) (struct ttm_buffer_object *bo,
 		     bool evict, bool interruptible,
-		     bool no_wait_reserve, bool no_wait_gpu,
+		     bool no_wait_gpu,
 		     struct ttm_mem_reg *new_mem);
 
 	/**
@@ -422,10 +422,10 @@ struct ttm_bo_driver {
 	 * documentation.
 	 */
 
-	bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
-	int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
+	bool (*sync_obj_signaled) (void *sync_obj);
+	int (*sync_obj_wait) (void *sync_obj,
 			      bool lazy, bool interruptible);
-	int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
+	int (*sync_obj_flush) (void *sync_obj);
 	void (*sync_obj_unref) (void **sync_obj);
 	void *(*sync_obj_ref) (void *sync_obj);
 
@@ -521,8 +521,6 @@ struct ttm_bo_global {
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
  * @val_seq: Current validation sequence.
- * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
- * If a GPU lockup has been detected, this is forced to 0.
  * @dev_mapping: A pointer to the struct address_space representing the
  * device address space.
  * @wq: Work queue structure for the delayed delete workqueue.
@@ -556,7 +554,6 @@ struct ttm_bo_device {
 	 * Protected by load / firstopen / lastclose /unload sync.
 	 */
 
-	bool nice_mode;
 	struct address_space *dev_mapping;
 
 	/*
@@ -706,7 +703,6 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
  * @proposed_placement: Proposed new placement for the buffer object.
  * @mem: A struct ttm_mem_reg.
  * @interruptible: Sleep interruptible when sliping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Allocate memory space for the buffer object pointed to by @bo, using
@@ -722,27 +718,13 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement,
 				struct ttm_mem_reg *mem,
 				bool interruptible,
-				bool no_wait_reserve, bool no_wait_gpu);
+				bool no_wait_gpu);
 
 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
 			   struct ttm_mem_reg *mem);
 extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *mem);
 
-/**
- * ttm_bo_wait_for_cpu
- *
- * @bo: Pointer to a struct ttm_buffer_object.
- * @no_wait: Don't sleep while waiting.
- *
- * Wait until a buffer object is no longer sync'ed for CPU access.
- * Returns:
- * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
- * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
- */
-
-extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
-
 extern void ttm_bo_global_release(struct drm_global_reference *ref);
 extern int ttm_bo_global_init(struct drm_global_reference *ref);
 
@@ -918,7 +900,6 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -933,15 +914,14 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-			   bool evict, bool no_wait_reserve,
-			   bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+			   bool evict, bool no_wait_gpu,
+			   struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -956,8 +936,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-			      bool evict, bool no_wait_reserve,
-			      bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+			      bool evict, bool no_wait_gpu,
+			      struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_free_old_node
@@ -973,10 +953,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @sync_obj: A sync object that signals when moving is complete.
- * @sync_obj_arg: An argument to pass to the sync object idle / wait
- * functions.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -990,9 +967,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
 
 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 				     void *sync_obj,
-				     void *sync_obj_arg,
-				     bool evict, bool no_wait_reserve,
-				     bool no_wait_gpu,
+				     bool evict, bool no_wait_gpu,
 				     struct ttm_mem_reg *new_mem);
 /**
  * ttm_io_prot
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 1926cae373ba..547e19f06e57 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -39,8 +39,6 @@
  *
  * @head:           list head for thread-private list.
  * @bo:             refcounted buffer object pointer.
- * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
- * adding a new sync object.
  * @reserved:       Indicates whether @bo has been reserved for validation.
  * @removed:        Indicates whether @bo has been removed from lru lists.
  * @put_count:      Number of outstanding references on bo::list_kref.
@@ -50,7 +48,6 @@
 struct ttm_validate_buffer {
 	struct list_head head;
 	struct ttm_buffer_object *bo;
-	void *new_sync_obj_arg;
 	bool reserved;
 	bool removed;
 	int put_count;
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index d6d1da468c97..72dcbe81dd07 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -60,7 +60,6 @@ struct ttm_mem_shrink {
  * for the GPU, and this will otherwise block other workqueue tasks(?)
  * At this point we use only a single-threaded workqueue.
  * @work: The workqueue callback for the shrink queue.
- * @queue: Wait queue for processes suspended waiting for memory.
  * @lock: Lock to protect the @shrink - and the memory accounting members,
  * that is, essentially the whole structure with some exceptions.
  * @zones: Array of pointers to accounting zones.
@@ -80,7 +79,6 @@ struct ttm_mem_global {
 	struct ttm_mem_shrink *shrink;
 	struct workqueue_struct *swap_queue;
 	struct work_struct work;
-	wait_queue_head_t queue;
 	spinlock_t lock;
 	struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
 	unsigned int num_zones;
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index b01c563b2751..fc0cf0649901 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -40,6 +40,7 @@
 #include <linux/list.h>
 #include <drm/drm_hashtab.h>
 #include <linux/kref.h>
+#include <linux/rcupdate.h>
 #include <ttm/ttm_memory.h>
 
 /**
@@ -120,6 +121,7 @@ struct ttm_object_device;
  */
 
 struct ttm_base_object {
+	struct rcu_head rhead;
 	struct drm_hash_item hash;
 	enum ttm_object_type object_type;
 	bool shareable;
@@ -268,4 +270,6 @@ extern struct ttm_object_device *ttm_object_device_init
 
 extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
 
+#define ttm_base_object_kfree(__object, __base)\
+	kfree_rcu(__object, __base.rhead)
 #endif
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index f83f793223ff..c8e1831d7572 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -17,6 +17,7 @@ enum dma_attr {
 	DMA_ATTR_NON_CONSISTENT,
 	DMA_ATTR_NO_KERNEL_MAPPING,
 	DMA_ATTR_SKIP_CPU_SYNC,
+	DMA_ATTR_FORCE_CONTIGUOUS,
 	DMA_ATTR_MAX,
 };
 
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 65af6887872f..4972e6e9ca93 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -111,4 +111,25 @@ static inline int kref_put_mutex(struct kref *kref,
 	}
 	return 0;
 }
+
+/**
+ * kref_get_unless_zero - Increment refcount for object unless it is zero.
+ * @kref: object.
+ *
+ * Return non-zero if the increment succeeded. Otherwise return 0.
+ *
+ * This function is intended to simplify locking around refcounting for
+ * objects that can be looked up from a lookup structure, and which are
+ * removed from that lookup structure in the object destructor.
+ * Operations on such objects require at least a read lock around
+ * lookup + kref_get, and a write lock around kref_put + remove from lookup
+ * structure. Furthermore, RCU implementations become extremely tricky.
+ * With a lookup followed by a kref_get_unless_zero *with return value check*
+ * locking in the kref_put path can be deferred to the actual removal from
+ * the lookup structure and RCU lookups become trivial.
+ */
+static inline int __must_check kref_get_unless_zero(struct kref *kref)
+{
+	return atomic_add_unless(&kref->refcount, 1, 0);
+}
 #endif /* _KREF_H_ */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 1e3481edf062..8d1e2bbee83a 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -778,6 +778,7 @@ struct drm_event_vblank {
 #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
 #define DRM_CAP_DUMB_PREFER_SHADOW 0x4
 #define DRM_CAP_PRIME 0x5
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
 
 #define DRM_PRIME_CAP_IMPORT 0x1
 #define DRM_PRIME_CAP_EXPORT 0x2
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index c0494d586e23..e7f52c334005 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
 	__u32	data;
 };
 
+enum drm_exynos_g2d_buf_type {
+	G2D_BUF_USERPTR = 1 << 31,
+};
+
 enum drm_exynos_g2d_event_type {
 	G2D_EVENT_NOT,
 	G2D_EVENT_NONSTOP,
 	G2D_EVENT_STOP,		/* not yet */
 };
 
+struct drm_exynos_g2d_userptr {
+	unsigned long userptr;
+	unsigned long size;
+};
+
 struct drm_exynos_g2d_set_cmdlist {
 	__u64					cmd;
-	__u64					cmd_gem;
+	__u64					cmd_buf;
 	__u32					cmd_nr;
-	__u32					cmd_gem_nr;
+	__u32					cmd_buf_nr;
 
 	/* for g2d event */
 	__u64					event_type;
@@ -154,6 +163,170 @@ struct drm_exynos_g2d_exec {
 	__u64					async;
 };
 
+enum drm_exynos_ops_id {
+	EXYNOS_DRM_OPS_SRC,
+	EXYNOS_DRM_OPS_DST,
+	EXYNOS_DRM_OPS_MAX,
+};
+
+struct drm_exynos_sz {
+	__u32	hsize;
+	__u32	vsize;
+};
+
+struct drm_exynos_pos {
+	__u32	x;
+	__u32	y;
+	__u32	w;
+	__u32	h;
+};
+
+enum drm_exynos_flip {
+	EXYNOS_DRM_FLIP_NONE = (0 << 0),
+	EXYNOS_DRM_FLIP_VERTICAL = (1 << 0),
+	EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1),
+};
+
+enum drm_exynos_degree {
+	EXYNOS_DRM_DEGREE_0,
+	EXYNOS_DRM_DEGREE_90,
+	EXYNOS_DRM_DEGREE_180,
+	EXYNOS_DRM_DEGREE_270,
+};
+
+enum drm_exynos_planer {
+	EXYNOS_DRM_PLANAR_Y,
+	EXYNOS_DRM_PLANAR_CB,
+	EXYNOS_DRM_PLANAR_CR,
+	EXYNOS_DRM_PLANAR_MAX,
+};
+
+/**
+ * A structure for ipp supported property list.
+ *
+ * @version: version of this structure.
+ * @ipp_id: id of ipp driver.
+ * @count: count of ipp driver.
+ * @writeback: flag of writeback supporting.
+ * @flip: flag of flip supporting.
+ * @degree: flag of degree information.
+ * @csc: flag of csc supporting.
+ * @crop: flag of crop supporting.
+ * @scale: flag of scale supporting.
+ * @refresh_min: min hz of refresh.
+ * @refresh_max: max hz of refresh.
+ * @crop_min: crop min resolution.
+ * @crop_max: crop max resolution.
+ * @scale_min: scale min resolution.
+ * @scale_max: scale max resolution.
+ */
+struct drm_exynos_ipp_prop_list {
+	__u32	version;
+	__u32	ipp_id;
+	__u32	count;
+	__u32	writeback;
+	__u32	flip;
+	__u32	degree;
+	__u32	csc;
+	__u32	crop;
+	__u32	scale;
+	__u32	refresh_min;
+	__u32	refresh_max;
+	__u32	reserved;
+	struct drm_exynos_sz	crop_min;
+	struct drm_exynos_sz	crop_max;
+	struct drm_exynos_sz	scale_min;
+	struct drm_exynos_sz	scale_max;
+};
+
+/**
+ * A structure for ipp config.
+ *
+ * @ops_id: property of operation directions.
+ * @flip: property of mirror, flip.
+ * @degree: property of rotation degree.
+ * @fmt: property of image format.
+ * @sz: property of image size.
+ * @pos: property of image position(src-cropped,dst-scaler).
+ */
+struct drm_exynos_ipp_config {
+	enum drm_exynos_ops_id ops_id;
+	enum drm_exynos_flip	flip;
+	enum drm_exynos_degree	degree;
+	__u32	fmt;
+	struct drm_exynos_sz	sz;
+	struct drm_exynos_pos	pos;
+};
+
+enum drm_exynos_ipp_cmd {
+	IPP_CMD_NONE,
+	IPP_CMD_M2M,
+	IPP_CMD_WB,
+	IPP_CMD_OUTPUT,
+	IPP_CMD_MAX,
+};
+
+/**
+ * A structure for ipp property.
+ *
+ * @config: source, destination config.
+ * @cmd: definition of command.
+ * @ipp_id: id of ipp driver.
+ * @prop_id: id of property.
+ * @refresh_rate: refresh rate.
+ */
+struct drm_exynos_ipp_property {
+	struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
+	enum drm_exynos_ipp_cmd	cmd;
+	__u32	ipp_id;
+	__u32	prop_id;
+	__u32	refresh_rate;
+};
+
+enum drm_exynos_ipp_buf_type {
+	IPP_BUF_ENQUEUE,
+	IPP_BUF_DEQUEUE,
+};
+
+/**
+ * A structure for ipp buffer operations.
+ *
+ * @ops_id: operation directions.
+ * @buf_type: definition of buffer.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @handle: Y, Cb, Cr each planar handle.
+ * @user_data: user data.
+ */
+struct drm_exynos_ipp_queue_buf {
+	enum drm_exynos_ops_id	ops_id;
+	enum drm_exynos_ipp_buf_type	buf_type;
+	__u32	prop_id;
+	__u32	buf_id;
+	__u32	handle[EXYNOS_DRM_PLANAR_MAX];
+	__u32	reserved;
+	__u64	user_data;
+};
+
+enum drm_exynos_ipp_ctrl {
+	IPP_CTRL_PLAY,
+	IPP_CTRL_STOP,
+	IPP_CTRL_PAUSE,
+	IPP_CTRL_RESUME,
+	IPP_CTRL_MAX,
+};
+
+/**
+ * A structure for ipp start/stop operations.
+ *
+ * @prop_id: id of property.
+ * @ctrl: definition of control.
+ */
+struct drm_exynos_ipp_cmd_ctrl {
+	__u32	prop_id;
+	enum drm_exynos_ipp_ctrl	ctrl;
+};
+
 #define DRM_EXYNOS_GEM_CREATE		0x00
 #define DRM_EXYNOS_GEM_MAP_OFFSET	0x01
 #define DRM_EXYNOS_GEM_MMAP		0x02
@@ -166,6 +339,12 @@ struct drm_exynos_g2d_exec {
 #define DRM_EXYNOS_G2D_SET_CMDLIST	0x21
 #define DRM_EXYNOS_G2D_EXEC		0x22
 
+/* IPP - Image Post Processing */
+#define DRM_EXYNOS_IPP_GET_PROPERTY	0x30
+#define DRM_EXYNOS_IPP_SET_PROPERTY	0x31
+#define DRM_EXYNOS_IPP_QUEUE_BUF	0x32
+#define DRM_EXYNOS_IPP_CMD_CTRL	0x33
+
 #define DRM_IOCTL_EXYNOS_GEM_CREATE		DRM_IOWR(DRM_COMMAND_BASE + \
 		DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
 
@@ -188,8 +367,18 @@ struct drm_exynos_g2d_exec {
 #define DRM_IOCTL_EXYNOS_G2D_EXEC		DRM_IOWR(DRM_COMMAND_BASE + \
 		DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
 
+#define DRM_IOCTL_EXYNOS_IPP_GET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_GET_PROPERTY, struct drm_exynos_ipp_prop_list)
+#define DRM_IOCTL_EXYNOS_IPP_SET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_SET_PROPERTY, struct drm_exynos_ipp_property)
+#define DRM_IOCTL_EXYNOS_IPP_QUEUE_BUF	DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_QUEUE_BUF, struct drm_exynos_ipp_queue_buf)
+#define DRM_IOCTL_EXYNOS_IPP_CMD_CTRL		DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_CMD_CTRL, struct drm_exynos_ipp_cmd_ctrl)
+
 /* EXYNOS specific events */
 #define DRM_EXYNOS_G2D_EVENT		0x80000000
+#define DRM_EXYNOS_IPP_EVENT		0x80000001
 
 struct drm_exynos_g2d_event {
 	struct drm_event	base;
@@ -200,4 +389,14 @@ struct drm_exynos_g2d_event {
 	__u32			reserved;
 };
 
+struct drm_exynos_ipp_event {
+	struct drm_event	base;
+	__u64			user_data;
+	__u32			tv_sec;
+	__u32			tv_usec;
+	__u32			prop_id;
+	__u32			reserved;
+	__u32			buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
 #endif /* _UAPI_EXYNOS_DRM_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 4322b1e7d2ed..b746a3cf5fa9 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -306,6 +306,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_SEMAPHORES	 20
 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
 #define I915_PARAM_RSVD_FOR_FUTURE_USE	 22
+#define I915_PARAM_HAS_SECURE_BATCHES	 23
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -671,6 +672,11 @@ struct drm_i915_gem_execbuffer2 {
 /** Resets the SO write offset registers for transform feedback on gen7. */
 #define I915_EXEC_GEN7_SOL_RESET	(1<<8)
 
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE		(1<<9)
+
 #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
 	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 4766c0f6a838..eeda91774c8a 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -913,9 +913,11 @@ struct drm_radeon_gem_va {
 /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
 #define RADEON_CS_KEEP_TILING_FLAGS 0x01
 #define RADEON_CS_USE_VM            0x02
+#define RADEON_CS_END_OF_FRAME      0x04 /* a hint from userspace which CS is the last one */
 /* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
 #define RADEON_CS_RING_GFX          0
 #define RADEON_CS_RING_COMPUTE      1
+#define RADEON_CS_RING_DMA          2
 /* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
 /* 0 = normal, + = higher priority, - = lower priority */
 
@@ -966,6 +968,10 @@ struct drm_radeon_cs {
 #define RADEON_INFO_MAX_PIPES		0x10
 /* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
 #define RADEON_INFO_TIMESTAMP		0x11
+/* max shader engines (SE) - needed for geometry shaders, etc. */
+#define RADEON_INFO_MAX_SE		0x12
+/* max SH per SE */
+#define RADEON_INFO_MAX_SH_PER_SE	0x13
 
 struct drm_radeon_info {
 	uint32_t		request;